Compare commits

...

90 Commits

Author SHA1 Message Date
Ho
7de388ef1a [Fix] Accept proof submission even it has been timeout (#1764) 2025-12-12 12:18:34 +09:00
Morty
27dd62eac3 feat(rollup-relayer): add blob fee tolerance (#1773) 2025-12-03 21:49:17 +08:00
Ho
22479a7952 [Feat] Galileo v2 (#1771)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-12-02 11:04:57 +01:00
Péter Garamvölgyi
690bc01c41 feat: force commit batches at hardfork boundary (#1768) 2025-11-30 20:36:53 +01:00
Péter Garamvölgyi
e75d6c16a9 feat: propose chunk at hardfork boundary (#1767) 2025-11-28 17:21:51 +01:00
Péter Garamvölgyi
752e4e1117 fix: Fix blob fee overflow on rollup-relayer and gas-oracle (#1772) 2025-11-28 15:44:37 +01:00
georgehao
2ecc42e2f5 Return when total < request page (#1766) 2025-11-25 23:36:40 +08:00
georgehao
de72e2dccb remove unused check (#1765) 2025-11-25 22:12:16 +08:00
georgehao
edb51236e2 bump version (#1763) 2025-11-25 21:00:57 +08:00
georgehao
15a23478d1 fix bridge history GetL2UnclaimedWithdrawalsByAddress (#1760)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-11-25 20:59:31 +08:00
Morty
9100a0bd4a fix: ci lint (#1762) 2025-11-25 17:45:05 +08:00
Morty
0ede0cd41f feat(blob-uploader): upload blob once proposed (#1759)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-11-25 14:07:08 +08:00
Ho
9dceae1ca2 [Feat] Galileo forking (#1753)
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
2025-11-24 17:37:04 +08:00
georgehao
235ba874c6 Update Galileo Dependency (#1752) 2025-11-17 18:18:48 +08:00
Zhang Zhuo
6bee33036f feat: the CLOAK privacy solution (#1737)
Co-authored-by: Ho <fan@scroll.io>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-11-14 15:00:37 +01:00
Ho
1985e54ab3 [Feat] For prover 4.6.1 (#1742) 2025-10-24 16:18:40 +09:00
Péter Garamvölgyi
bfc0fdd7ce feat: support Fusaka blob type (#1746)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: jonastheis <4181434+jonastheis@users.noreply.github.com>
2025-10-18 08:52:29 +02:00
Jonas Theis
426c57a5fa feat(l2 relayer): enhance batch submission strategy based on backlog size (#1745) 2025-10-17 09:06:53 +08:00
Péter Garamvölgyi
b7fdf48c30 ci: Do not push latest tag on Docker images (#1744) 2025-09-30 08:40:49 +02:00
johnsonjie
ad0c918944 update prover image cuda version (#1741) 2025-09-24 09:42:19 +08:00
Ho
1098876183 [Feat] Update zkvm to 0.6 (use openvm 1.4) (#1736) 2025-09-19 10:29:13 +09:00
Jonas Theis
9e520e7769 feat(tx sender): add multiple write clients for more reliable tx submission (#1740) 2025-09-19 08:56:01 +08:00
Jonas Theis
de7f6e56a9 refactor(rollup relayer): remove max_block_num_per_chunk configuration parameter (#1729)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
2025-09-10 14:16:13 +08:00
Ho
3b323198dc [feat] Integration e2e test tools (#1694)
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-09-09 15:48:20 +09:00
Ho
c11e0283e8 [FIX] script for detecting plonky3gpu version (#1730) 2025-09-09 10:45:37 +08:00
Ho
a5a7844646 [FIX] Compatible with current prover (before 0.5.6) (#1732) 2025-09-02 19:20:02 +09:00
georgehao
7ff5b190ec bump version to v4.5.44 (#1731) 2025-09-02 09:56:59 +08:00
Ho
b297edd28d [Feat] Prover loading assets (circuits) dynamically (#1717) 2025-08-29 19:32:44 +09:00
Péter Garamvölgyi
47c85d4983 Fix unique chunk hash (#1727) 2025-08-26 14:27:24 +02:00
Morty
1552e98b79 fix(bridge-history+rollup-relayer): update da-codec to prevent zstd deadlock (#1724)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-08-25 16:23:01 +08:00
Péter Garamvölgyi
a65b3066a3 fix: remove unnecessary logs (#1725) 2025-08-22 15:02:20 +02:00
Morty
1f2b397bbd feat(bridge-history): add aws s3 blob client (#1716)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-08-12 14:59:44 +08:00
colin
ae791a0714 fix(rollup-relayer): sanity checks (#1720) 2025-08-12 14:57:02 +08:00
colin
c012f7132d feat(rollup-relayer): add sanity checks before committing and finalizing (#1714)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-08-11 17:49:29 +08:00
Jonas Theis
6897cc54bd feat(permissionless batches): batch production toolkit and operator recovery (#1555)
Signed-off-by: noelwei <fan@scroll.io>
Co-authored-by: Ömer Faruk Irmak <omerfirmak@gmail.com>
Co-authored-by: noelwei <fan@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Morty <70688412+yiweichi@users.noreply.github.com>
Co-authored-by: omerfirmak <omerfirmak@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: kunxian xia <xiakunxian130@gmail.com>
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Morty <yiweichi1@gmail.com>
2025-08-04 12:37:31 +08:00
georgehao
d21fa36803 change l2watcher from w.GetBlockByNumberOrHash to BlockByNumber (#1715) 2025-07-31 18:29:50 +08:00
colin
fc75299eb3 fix(gas-oracle): nonce too low when resubmission (#1712) 2025-07-30 14:50:41 +08:00
Morty
4bfcd35d0c fix(bridge-history): update dependency go-ethereum version (#1713) 2025-07-30 14:32:12 +08:00
colin
6d62f8e5fa fix(gas-oracle): typos in config file example (#1711) 2025-07-28 18:12:50 +08:00
Morty
392ae07736 feat(blob-uploader): support codec v8 (#1707) 2025-07-24 01:34:46 +08:00
colin
db80b47820 fix(rollup-relayer): upgrade boundary message queue hash initialization (#1706) 2025-07-23 18:51:56 +08:00
Zhang Zhuo
daa1387208 circuit-0.5.2 (#1705)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 14:16:16 +08:00
Zhang Zhuo
67b05558e2 upgrade circuit to 0.5.2 (#1703)
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-07-23 10:52:08 +08:00
Ho
1e447b0fef [Fix] building failure in gpu image (#1702) 2025-07-21 20:26:39 +08:00
georgehao
f7c6ecadf4 bump to v4.5.31 (#1700) 2025-07-18 16:41:59 +08:00
Ho
9d94f943e5 [Upgrade] feynman 0.5.0rc1 (#1699) 2025-07-18 15:57:31 +08:00
Morty
de17ad43ff fix(blob-uploader): orm function InsertOrUpdateBlobUpload and s3 bucket region configuration (#1679)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-07-16 18:36:27 +08:00
colin
4233ad928c feat(rollup-relayer): support Validium (#1693)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-07-09 15:02:54 +08:00
georgehao
3050ccb40f update feynman prover makefile (#1691) 2025-07-05 10:33:08 +08:00
Ho
12e89201a1 feat: upgrading for feynman (#1690)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-07-04 16:59:48 +08:00
colin
a0ee508bbd fix(bridge-history): commit batch txns and blobs fetching (#1689) 2025-07-04 01:50:52 +08:00
georgehao
b8909d3795 update intermediate docker runs on (#1688) 2025-07-03 21:52:24 +08:00
Ho
b7a172a519 feat: upgrade zkvm-prover to feynman fork (#1686)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2025-07-03 18:48:33 +08:00
georgehao
80807dbb75 Feat/upgrade intermedidate (#1687) 2025-07-03 10:08:03 +08:00
georgehao
a776ca7c82 refactor: remove unused check (#1685) 2025-07-03 09:21:33 +08:00
Ho
ea38ae7e96 Refactor/zkvm 3 (#1684) 2025-07-01 06:39:27 +08:00
colin
9dc57c6126 feat(rollup-relayer): support codecv8 (#1681)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-06-26 21:17:56 +08:00
Ho
9367565a31 [Refactor] Universal task (#1680)
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-06-25 14:42:51 +08:00
Ho
d2f7663d26 [Refactor] For universal prover task (the update on coordinator) (#1682)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2025-06-19 14:54:08 +09:00
Morty
b0943b1035 refactor(rollup-relayer): simplify construct commit batch payload (#1673) 2025-06-18 21:37:29 +08:00
Morty
5d6b5a89f4 feat(blob-uploader): blob_upload table add batch hash (#1677) 2025-06-11 18:21:14 +08:00
colin
4ee459a602 fix(gas-oracle & rollup-relayer): blob base fee calculation based on excessBlobGas field (#1676)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-06-10 17:38:21 +08:00
Morty
276385fd0a feat: add blob storage service (#1672)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
2025-06-10 17:16:16 +08:00
colin
82fb15de3b fix(rollup-relayer): add transaction support to insert l2 blocks (#1674) 2025-06-05 21:27:27 +08:00
colin
5204ad50e0 fix(bridge-history): unclaimed withdrawals (#1671) 2025-05-28 00:54:27 +08:00
colin
f824fb0efc fix(coordinator): remove initialize euclid VKs (#1669) 2025-05-26 15:42:26 +08:00
colin
a55c7bdc77 refactor(coordinator): remove outdated logic (#1668)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-05-26 14:32:39 +08:00
Alejandro Ranchal-Pedrosa
47b1a037a9 Fix bug sequencer submission strategy and log commit price (#1664)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
2025-05-23 10:02:49 +01:00
Alejandro Ranchal-Pedrosa
ae34020c34 perf(relayer): submission strategy fix logs, use blocktime for submission strategy and log metrics. (#1663)
Co-authored-by: ranchalp <ranchalp@users.noreply.github.com>
Co-authored-by: Jonas Theis <4181434+jonastheis@users.noreply.github.com>
2025-05-22 20:38:10 +08:00
Jonas Theis
fa9fab6e98 fix(relayer): ProcessPendingBatches (#1661)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2025-05-21 18:23:04 +02:00
colin
c4f869a33a refactor(sender): remove fallback gas limit (#1662) 2025-05-21 22:02:27 +08:00
colin
0cee9a51e6 refactor(rollup-relayer): simplify logic post-Euclid (#1658)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-05-21 17:59:49 +08:00
colin
97de988228 feat: coordinator and prover support v0.4.2 (#1660)
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Zhang Zhuo <mycinbrin@gmail.com>
2025-05-20 14:10:52 +08:00
Alejandro Ranchal-Pedrosa
a12175dafc perf(relayer): add sequencer submission strategy with blob‐fee history and target price (#1659)
Co-authored-by: jonastheis <4181434+jonastheis@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
2025-05-19 22:32:28 +02:00
colin
fedfa04c2b refactor(coordinator): simplify logic post-Euclid (#1652) 2025-05-16 23:37:52 +08:00
colin
0d8b00c3de fix(gas-oracle): nil pointer error and allow update gas price when no diff (#1657) 2025-05-12 12:35:42 +08:00
colin
826357ab5d fix(rollup-relayer): update commit status logic (#1656) 2025-05-10 18:46:36 +08:00
georgehao
d26381cba3 upgrade cargo chef to 0.1.71 (#1655) 2025-05-08 21:40:25 +08:00
georgehao
0e65686ce4 upgrade intermdiate rust to 1.86.0 (#1654) 2025-05-08 19:30:13 +08:00
georgehao
6dd878eaca upgrade intermediate rust version (#1653) 2025-05-08 17:57:15 +08:00
colin
a18fe06440 feat: openvm v0.3.0 (#1648)
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-24 22:06:35 +08:00
colin
3ac69bec51 feat(rollup-relayer): add a tool to analyze chunk/batch/bundle proposing (#1645)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-23 13:41:03 +08:00
colin
e80f030246 feat(gas-oracle): add a warn log when entering fallback mode (#1646) 2025-04-16 18:05:14 +08:00
colin
a77f1413ee chore(gas-oracle): remove unused code (#1644) 2025-04-14 16:36:27 +08:00
colin
5b62692098 chore(gas-oracle): decommission L2 gas price oracle (#1643) 2025-04-14 15:35:23 +08:00
Morty
4f34e90f00 fix(zkvm-prover): update scroll-proving-sdk (#1642) 2025-04-14 13:38:52 +08:00
colin
6b1b822c81 fix(coordinator): set v4.4.56 as minimum prover version (#1641)
Co-authored-by: Morty <yiweichi1@gmail.com>
2025-04-11 16:35:39 +08:00
colin
a34c01d90b fix(coordinator): support darwin chunk provers (#1640)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-09 19:04:20 +08:00
colin
0578aab3ae feat: openvm euclid v2 (#1613)
Signed-off-by: noelwei <fan@scroll.io>
Co-authored-by: jonastheis <4181434+jonastheis@users.noreply.github.com>
Co-authored-by: Ömer Faruk Irmak <omerfirmak@gmail.com>
Co-authored-by: noelwei <fan@scroll.io>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Morty <70688412+yiweichi@users.noreply.github.com>
Co-authored-by: omerfirmak <omerfirmak@users.noreply.github.com>
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: kunxian xia <xiakunxian130@gmail.com>
Co-authored-by: Morty <yiweichi1@gmail.com>
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-04 16:44:09 +08:00
colin
55dfbf6735 fix(rollup-relayer): rollup status overwrite (#1638) 2025-04-01 14:16:56 +08:00
281 changed files with 17114 additions and 21257 deletions

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-12-06
toolchain: nightly-2025-08-18
override: true
components: rustfmt, clippy
- name: Install Go
@@ -41,12 +41,12 @@ jobs:
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Lint
working-directory: 'common'
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
workspaces: ". -> target"
# - name: Lint
# working-directory: 'common'
# run: |
# rm -rf $HOME/.cache/golangci-lint
# make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest

View File

@@ -33,7 +33,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-12-03
toolchain: nightly-2025-08-18
override: true
components: rustfmt, clippy
- name: Install Go
@@ -112,7 +112,7 @@ jobs:
- name: Test coordinator packages
working-directory: 'coordinator'
run: |
# go test -exec "env LD_LIBRARY_PATH=${PWD}/verifier/lib" -v -race -gcflags="-l" -ldflags="-s=false" -coverpkg="scroll-tech/coordinator" -coverprofile=coverage.txt -covermode=atomic ./...
make libzkp
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic -tags mock_verifier ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3

View File

@@ -10,7 +10,8 @@ env:
jobs:
gas_oracle:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -50,12 +51,11 @@ jobs:
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup_relayer:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -95,12 +95,55 @@ jobs:
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
blob_uploader:
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: blob-uploader
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: blob-uploader
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/blob_uploader.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
rollup-db-cli:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -140,12 +183,11 @@ jobs:
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-fetcher:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -185,12 +227,11 @@ jobs:
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-api:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -230,12 +271,11 @@ jobs:
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-db-cli:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -275,12 +315,11 @@ jobs:
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-api:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -319,12 +358,11 @@ jobs:
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-cron:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -364,6 +402,4 @@ jobs:
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest

View File

@@ -38,6 +38,7 @@ jobs:
make dev_docker
make -C rollup mock_abi
make -C common/bytecode all
make -C coordinator/internal/logic/libzkp build
- name: Run integration tests
run: |
go test -v -tags="mock_prover mock_verifier" -p 1 -coverprofile=coverage.txt scroll-tech/integration-test/...

View File

@@ -22,9 +22,9 @@ on:
required: true
type: choice
options:
- nightly-2023-12-03
- nightly-2022-12-10
default: "nightly-2023-12-03"
- 1.86.0
- nightly-2025-08-18
default: "nightly-2025-08-18"
PYTHON_VERSION:
description: "Python version"
required: false
@@ -39,7 +39,8 @@ on:
options:
- "11.7.1"
- "12.2.2"
default: "11.7.1"
- "12.9.1"
default: "12.9.1"
CARGO_CHEF_TAG:
description: "Cargo chef version"
required: true
@@ -47,6 +48,7 @@ on:
type: choice
options:
- 0.1.41
- 0.1.71
BASE_IMAGE:
description: "which intermediate image you want to update"
required: true
@@ -67,7 +69,8 @@ defaults:
jobs:
build-and-publish-intermediate:
runs-on: ubuntu-latest
runs-on:
group: scroll-reth-runner-group
steps:
- name: Checkout code
uses: actions/checkout@v4

View File

@@ -1,99 +0,0 @@
name: Prover
on:
push:
branches:
- main
- staging
- develop
- alpha
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
pull_request:
types:
- opened
- reopened
- synchronize
- ready_for_review
paths:
- 'prover/**'
- '.github/workflows/prover.yml'
defaults:
run:
working-directory: 'prover'
jobs:
skip_check:
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
cancel_others: 'true'
concurrent_skipping: 'same_content_newer'
paths_ignore: '["**/README.md"]'
fmt:
needs: [skip_check]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: rustfmt
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Cargo check
run: cargo check --all-features
- name: Cargo fmt
run: cargo fmt --all -- --check
clippy:
needs: [skip_check, fmt]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: clippy
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Run clippy
run: cargo clippy --all-features --all-targets -- -D warnings
compile:
needs: [skip_check, clippy]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Test
run: |
make prover

1
.gitignore vendored
View File

@@ -24,3 +24,4 @@ sftp-config.json
*~
target
zkvm-prover/config.json

View File

@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
## Contribute to Scroll
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
## Issues and PRs

File diff suppressed because it is too large Load Diff

67
Cargo.toml Normal file
View File

@@ -0,0 +1,67 @@
[workspace]
members = [
"crates/libzkp",
"crates/l2geth",
"crates/libzkp_c",
"crates/prover-bin",
]
resolver = "2"
[workspace.package]
authors = ["Scroll developers"]
edition = "2021"
homepage = "https://scroll.io"
readme = "README.md"
repository = "https://github.com/scroll-tech/scroll"
version = "4.7.1"
[workspace.dependencies]
scroll-zkvm-prover = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-verifier = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
scroll-zkvm-types = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.7.1" }
sbv-primitives = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll", "rkyv"] }
sbv-utils = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2" }
sbv-core = { git = "https://github.com/scroll-tech/stateless-block-verifier", tag = "scroll-v91.2", features = ["scroll"] }
metrics = "0.23.0"
metrics-util = "0.17"
metrics-tracing-context = "0.16.0"
anyhow = "1.0"
alloy = { version = "1", default-features = false }
alloy-primitives = { version = "1.4.1", default-features = false, features = ["tiny-keccak"] }
# also use this to trigger "serde" feature for primitives
alloy-serde = { version = "1", default-features = false }
serde = { version = "1", default-features = false, features = ["derive"] }
serde_json = { version = "1.0" }
serde_derive = "1.0"
serde_with = "3"
itertools = "0.14"
tiny-keccak = "2.0"
tracing = "0.1"
eyre = "0.6"
once_cell = "1.20"
base64 = "0.22"
[patch.crates-io]
revm = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-bytecode = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-context = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-context-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-database = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-database-interface = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-handler = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-inspector = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-interpreter = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-precompile = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-primitives = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
revm-state = { git = "https://github.com/scroll-tech/revm", tag = "scroll-v91" }
[profile.maxperf]
inherits = "release"
lto = "fat"
codegen-units = 1

View File

@@ -1,6 +1,6 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.8.23
L2GETH_TAG=scroll-v5.9.17
help: ## Display this help message
@grep -h \

View File

@@ -28,7 +28,7 @@ We welcome community contributions to this repository. Before you submit any iss
## Prerequisites
+ Go 1.21
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
+ Rust (for version, see [rust-toolchain](./rust-toolchain))
+ Hardhat / Foundry
+ Docker

View File

@@ -10,15 +10,18 @@ require (
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
github.com/stretchr/testify v1.9.0
github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
// Hotfix for header hash incompatibility issue.
// PR: https://github.com/scroll-tech/go-ethereum/pull/1133/
// CAUTION: Requires careful handling. When upgrading go-ethereum, ensure this fix remains up-to-date in this branch.
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b
require (
dario.cat/mergo v1.0.0 // indirect
@@ -30,10 +33,10 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.13.0 // indirect
github.com/consensys/bavard v0.1.27 // indirect
github.com/consensys/gnark-crypto v0.16.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
@@ -41,7 +44,7 @@ require (
github.com/docker/docker v26.1.0+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
@@ -98,7 +101,7 @@ require (
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/supranational/blst v0.3.15 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
@@ -110,7 +113,7 @@ require (
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.21.0 // indirect

View File

@@ -53,16 +53,16 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs=
github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -88,8 +88,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -214,8 +214,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b h1:pMQKnroJoS/FeL1aOWkz7/u1iBHUP8PWjZstNuzoUGE=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092359-25d5bf6b817b/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
@@ -341,10 +341,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
@@ -387,8 +387,8 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=

View File

@@ -38,6 +38,7 @@ type FetcherConfig struct {
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
AwsS3Endpoint string `json:"AwsS3Endpoint"`
}
// RedisConfig redis config

View File

@@ -39,6 +39,9 @@ type L1MessageFetcher struct {
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
blobClient := blob_client.NewBlobClients()
if cfg.AwsS3Endpoint != "" {
blobClient.AddBlobClient(blob_client.NewAwsS3Client(cfg.AwsS3Endpoint))
}
if cfg.BeaconNodeAPIEndpoint != "" {
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
if err != nil {

View File

@@ -361,7 +361,6 @@ func getTxHistoryInfoFromBridgeBatchDepositMessage(message *orm.BridgeBatchDepos
func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pageNum, pageSize uint64) ([]*types.TxHistoryInfo, uint64, bool, error) {
start := int64((pageNum - 1) * pageSize)
end := start + int64(pageSize) - 1
total, err := h.redis.ZCard(ctx, cacheKey).Result()
if err != nil {
log.Error("failed to get zcard result", "error", err)
@@ -372,6 +371,10 @@ func (h *HistoryLogic) getCachedTxsInfo(ctx context.Context, cacheKey string, pa
return nil, 0, false, nil
}
if start >= total {
return nil, 0, false, nil
}
values, err := h.redis.ZRevRange(ctx, cacheKey, start, end).Result()
if err != nil {
log.Error("failed to get zrange result", "error", err)
@@ -450,5 +453,6 @@ func (h *HistoryLogic) processAndCacheTxHistoryInfo(ctx context.Context, cacheKe
log.Error("cache miss after write, expect hit", "cached key", cacheKey, "page", page, "page size", pageSize, "error", err)
return nil, 0, err
}
return pagedTxs, total, nil
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
@@ -252,6 +253,11 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
// Key: commit transaction hash
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
txBlobIndexMap := make(map[common.Hash][]common.Hash)
// Cache for the previous transaction to avoid duplicate fetches
var lastTxHash common.Hash
var lastTx *types.Transaction
var l1BatchEvents []*orm.BatchEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
@@ -261,11 +267,28 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
log.Error("Failed to unpack CommitBatch event", "err", err)
return nil, err
}
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
// Get transaction, reuse if it's the same as previous
var commitTx *types.Transaction
if lastTxHash == vlog.TxHash && lastTx != nil {
commitTx = lastTx
} else {
log.Debug("Fetching commit batch transaction", "txHash", vlog.TxHash.String())
// Create 10-second timeout context for transaction fetch
txCtx, txCancel := context.WithTimeout(ctx, 10*time.Second)
fetchedTx, isPending, err := client.TransactionByHash(txCtx, vlog.TxHash)
txCancel()
if err != nil || isPending {
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
}
commitTx = fetchedTx
lastTxHash = vlog.TxHash
lastTx = commitTx
}
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
if err != nil {
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
@@ -305,7 +328,13 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
}
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
log.Debug("Processing blob data", "blobVersionedHash", blobVersionedHash.String(), "batchIndex", event.BatchIndex.Uint64(), "currentIndex", currentIndex)
// Create 20-second timeout context for blob processing
blobCtx, blobCancel := context.WithTimeout(ctx, 20*time.Second)
blocks, err := e.getBatchBlockRangeFromBlob(blobCtx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
blobCancel()
if err != nil {
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)

View File

@@ -154,10 +154,10 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
db := c.db.WithContext(ctx)
db = db.Model(&CrossMessage{})
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
db = db.Where("sender = ?", sender)
db = db.Order("block_timestamp desc")
db = db.Limit(500)
db = db.Limit(10000)
if err := db.Find(&messages).Error; err != nil {
return nil, fmt.Errorf("failed to get L2 claimable withdrawal messages by sender address, sender: %v, error: %w", sender, err)
}

View File

@@ -0,0 +1,30 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build blob_uploader
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
# Pull blob_uploader into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/blob_uploader /bin/
WORKDIR /app
ENTRYPOINT ["blob_uploader"]

View File

@@ -0,0 +1,5 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -1,22 +1,25 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as chef
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as chef
WORKDIR app
FROM chef as planner
COPY ./common/libzkp/impl/ .
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY ./rust-toolchain ./
RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY ./rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
COPY ./crates/ ./crates/
COPY ./Cargo.* ./
COPY .git .git
RUN cargo build --release -p libzkp-c
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
FROM scrolltech/go-rust-builder:go-1.22.12-rust-nightly-2025-02-14 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
@@ -31,9 +34,9 @@ RUN go mod download -x
# Build coordinator
FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/libzkp/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api && mv ./build/bin/coordinator_api /bin/coordinator_api
RUN mv coordinator/internal/logic/libzkp/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
FROM ubuntu:20.04

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -4,3 +4,5 @@ docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*
permissionless-batches/conf/

View File

@@ -18,6 +18,6 @@ RUN cd /src/zkvm-prover && make prover
FROM ubuntu:24.04 AS runtime
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
COPY --from=builder /src/target/release/prover /usr/local/bin/
ENTRYPOINT ["prover"]

View File

@@ -0,0 +1,30 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/permissionless_batches/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/rollup_relayer /bin/
WORKDIR /app
ENTRYPOINT ["rollup_relayer"]

View File

@@ -0,0 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
permissionless-batches/conf/

View File

@@ -1,5 +1,8 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*
*target/*
permissionless-batches/conf/

3
common/.gitignore vendored
View File

@@ -1,4 +1,3 @@
/build/bin
.idea
libzkp/impl/target
libzkp/interface/*.a
libzkp

View File

@@ -4,5 +4,4 @@ test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd libzkp/impl && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
GOBIN=$(PWD)/build/bin go run ../build/lint.go

View File

@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}
// InitDB init the db handler

View File

@@ -15,7 +15,7 @@ require (
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/stretchr/testify v1.10.0
github.com/testcontainers/testcontainers-go v0.30.0
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
@@ -64,7 +64,7 @@ require (
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/distribution/reference v0.5.0 // indirect
@@ -79,7 +79,7 @@ require (
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsevents v0.1.1 // indirect
@@ -184,7 +184,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
github.com/scroll-tech/da-codec v0.10.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
@@ -198,7 +198,7 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.4.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/supranational/blst v0.3.15 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect

View File

@@ -155,8 +155,8 @@ github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoY
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -214,8 +214,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
@@ -707,8 +707,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=

File diff suppressed because it is too large Load Diff

View File

@@ -1,34 +0,0 @@
[package]
name = "zkp"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[patch.crates-io]
# patched add rkyv support & MSRV 1.77
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
[dependencies]
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.1.0-rc.6", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.1.0-rc.6", package = "scroll-zkvm-verifier" }
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
once_cell = "1.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
anyhow = "1.0.86"
[profile.test]
opt-level = 3
[profile.release]
opt-level = 3

View File

@@ -1,11 +0,0 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release
fmt:
@cargo fmt --all -- --check
clippy:
@cargo check --all-features
@cargo clippy --release -- -D warnings

View File

@@ -1 +0,0 @@
nightly-2024-12-06

View File

@@ -1,82 +0,0 @@
mod utils;
mod verifier;
use std::path::Path;
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use verifier::{TaskType, VerifierConfig};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init(config: *const c_char) {
let config_str = c_char_to_str(config);
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
verifier::init(verifier_config);
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
// Skip verification for darwinV2 as we can't host darwinV2 and euclid verifiers on the same
// binary.
if fork_name_str == "darwinV2" {
return true as c_char;
}
let proof = c_char_to_vec(proof);
let verifier = verifier::get_verifier(fork_name_str);
if let Err(e) = verifier {
log::warn!("failed to get verifier, error: {:#}", e);
return 0 as c_char;
}
match verifier.unwrap().verify(task_type, proof) {
Err(e) => {
log::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
_dump_vk(fork_name, file);
}
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let verifier = verifier::get_verifier(fork_name_str);
if let Ok(verifier) = verifier {
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
}
}

View File

@@ -1,27 +0,0 @@
use std::{
ffi::CStr,
os::raw::c_char,
panic::{catch_unwind, AssertUnwindSafe},
};
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()
}
pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_bytes().to_vec()
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {
s.to_string()
} else if let Some(s) = err.downcast_ref::<&str>() {
s.to_string()
} else {
format!("unable to get panic info {err:?}")
}
})
}

View File

@@ -1,76 +0,0 @@
#![allow(static_mut_refs)]
mod euclid;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TaskType {
Chunk,
Batch,
Bundle,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VKDump {
pub chunk_vk: String,
pub batch_vk: String,
pub bundle_vk: String,
}
pub trait ProofVerifier {
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
fn dump_vk(&self, file: &Path);
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CircuitConfig {
pub fork_name: String,
pub params_path: String,
pub assets_path: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VerifierConfig {
pub low_version_circuit: CircuitConfig,
pub high_version_circuit: CircuitConfig,
}
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_LOW
.set(VerifierPair(
config.high_version_circuit.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
}
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_LOW.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
}
bail!("failed to get verifier, key not found, {}", fork_name)
}

View File

@@ -1,65 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifier, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifier,
}
impl EuclidVerifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidVerifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -1,12 +0,0 @@
// BatchVerifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
void init(char* config);
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
char verify_chunk_proof(char* proof, char* fork_name);
void dump_vk(char* fork_name, char* file);

View File

@@ -34,7 +34,7 @@ services:
# Sets up the genesis configuration for the go-ethereum client from a JSON file.
geth-genesis:
image: "ethereum/client-go:v1.13.14"
image: "ethereum/client-go:v1.14.0"
command: --datadir=/data/execution init /data/execution/genesis.json
volumes:
- data:/data
@@ -80,7 +80,7 @@ services:
# Runs the go-ethereum execution client with the specified, unlocked account and necessary
# APIs to allow for proof-of-stake consensus via Prysm.
geth:
image: "ethereum/client-go:v1.13.14"
image: "ethereum/client-go:v1.14.0"
command:
- --http
- --http.api=eth,net,web3

View File

@@ -1,4 +1,4 @@
FROM ethereum/client-go:v1.13.14
FROM ethereum/client-go:v1.14.0
COPY password /l1geth/
COPY genesis.json /l1geth/

View File

@@ -10,6 +10,7 @@ import (
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/compose"
"github.com/testcontainers/testcontainers-go/modules/postgres"
@@ -166,13 +167,13 @@ func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
}
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
// GetPoSL1Client returns a raw rpc client by dialing the L1 node
func (t *TestcontainerApps) GetPoSL1Client() (*rpc.Client, error) {
endpoint, err := t.GetPoSL1EndPoint()
if err != nil {
return nil, err
}
return ethclient.Dial(endpoint)
return rpc.Dial(endpoint)
}
// GetDBEndPoint returns the endpoint of the running postgres container
@@ -220,11 +221,20 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
// GetL2GethClient returns a ethclient by dialing running L2Geth
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
rpcCli, err := t.GetL2Client()
if err != nil {
return nil, err
}
return ethclient.NewClient(rpcCli), nil
}
// GetL2GethClient returns a rpc client by dialing running L2Geth
func (t *TestcontainerApps) GetL2Client() (*rpc.Client, error) {
endpoint, err := t.GetL2GethEndPoint()
if err != nil {
return nil, err
}
client, err := ethclient.Dial(endpoint)
client, err := rpc.Dial(endpoint)
if err != nil {
return nil, err
}

View File

@@ -3,7 +3,6 @@ package testcontainers
import (
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
)
@@ -14,7 +13,6 @@ func TestNewTestcontainerApps(t *testing.T) {
err error
endpoint string
gormDBclient *gorm.DB
ethclient *ethclient.Client
)
testApps := NewTestcontainerApps()
@@ -32,17 +30,17 @@ func TestNewTestcontainerApps(t *testing.T) {
endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetL2GethClient()
l2RawClient, err := testApps.GetL2Client()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NotNil(t, l2RawClient)
assert.NoError(t, testApps.StartPoSL1Container())
endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetPoSL1Client()
l1RawClient, err := testApps.GetPoSL1Client()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NotNil(t, l1RawClient)
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
endpoint, err = testApps.GetWeb3SignerEndpoint()

4
common/testdata/blobdata.json vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -276,8 +276,8 @@ const (
SenderTypeFinalizeBatch
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
SenderTypeL1GasOracle
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
SenderTypeL2GasOracle
// SenderTypeL2GasOracleDeprecated indicates a sender from L1 responsible for updating L2 gas prices, which is deprecated.
SenderTypeL2GasOracleDeprecated
)
// String returns a string representation of the SenderType.
@@ -289,8 +289,8 @@ func (t SenderType) String() string {
return "SenderTypeFinalizeBatch"
case SenderTypeL1GasOracle:
return "SenderTypeL1GasOracle"
case SenderTypeL2GasOracle:
return "SenderTypeL2GasOracle"
case SenderTypeL2GasOracleDeprecated:
return "SenderTypeL2GasOracleDeprecated"
default:
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
}
@@ -326,3 +326,53 @@ func (s TxStatus) String() string {
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
}
}
// BlobUploadStatus represents the status of a blob upload
type BlobUploadStatus int
const (
// BlobUploadStatusUndefined indicates an undefined status
BlobUploadStatusUndefined BlobUploadStatus = iota
// BlobUploadStatusPending indicates a pending upload status
BlobUploadStatusPending
// BlobUploadStatusUploaded indicates a successful upload status
BlobUploadStatusUploaded
// BlobUploadStatusFailed indicates a failed upload status
BlobUploadStatusFailed
)
func (s BlobUploadStatus) String() string {
switch s {
case BlobUploadStatusPending:
return "BlobUploadStatusPending"
case BlobUploadStatusUploaded:
return "BlobUploadStatusUploaded"
case BlobUploadStatusFailed:
return "BlobUploadStatusFailed"
default:
return fmt.Sprintf("Unknown BlobUploadStatus (%d)", int32(s))
}
}
// BlobStoragePlatform represents the platform a blob upload to
type BlobStoragePlatform int
const (
// BlobStoragePlatformUndefined indicates an undefined platform
BlobStoragePlatformUndefined BlobStoragePlatform = iota
// BlobStoragePlatformS3 represents AWS S3
BlobStoragePlatformS3
// BlobStoragePlatformArweave represents storage blockchain Arweave
BlobStoragePlatformArweave
)
func (s BlobStoragePlatform) String() string {
switch s {
case BlobStoragePlatformS3:
return "BlobStoragePlatformS3"
case BlobStoragePlatformArweave:
return "BlobStoragePlatformArweave"
default:
return fmt.Sprintf("Unknown BlobStoragePlatform (%d)", int32(s))
}
}

View File

@@ -173,9 +173,9 @@ func TestSenderType(t *testing.T) {
"SenderTypeL1GasOracle",
},
{
"SenderTypeL2GasOracle",
SenderTypeL2GasOracle,
"SenderTypeL2GasOracle",
"SenderTypeL2GasOracleDeprecated",
SenderTypeL2GasOracleDeprecated,
"SenderTypeL2GasOracleDeprecated",
},
{
"Invalid Value",

View File

@@ -4,12 +4,10 @@ import (
"encoding/json"
"errors"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
)
const (
euclidFork = "euclid"
"github.com/scroll-tech/go-ethereum/common/hexutil"
)
// ProofType represents the type of task.
@@ -39,195 +37,137 @@ const (
ProofTypeBundle
)
// ChunkTaskDetail is a type containing ChunkTask detail.
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
type ChunkTaskDetail struct {
BlockHashes []common.Hash `json:"block_hashes"`
Version uint8 `json:"version"`
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BlockHashes []common.Hash `json:"block_hashes"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
}
// it is a hex encoded big with fixed length on 48 bytes
type Byte48 struct {
hexutil.Big
}
func (e Byte48) MarshalText() ([]byte, error) {
i := e.ToInt()
// overrite encode big
if sign := i.Sign(); sign < 0 {
// sanity check
return nil, errors.New("Byte48 must be positive integer")
} else {
s := i.Text(16)
if len(s) > 96 {
return nil, errors.New("integer Exceed 384bit")
}
return []byte(fmt.Sprintf("0x%0*s", 96, s)), nil
}
}
func isString(input []byte) bool {
return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
}
// hexutil.Big has limition of 256bit so we have to override it ...
func (e *Byte48) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errors.New("not hex string")
}
b, err := hexutil.Decode(string(input[1 : len(input)-1]))
if err != nil {
return err
}
if len(b) != 48 {
return fmt.Errorf("not a 48 bytes hex string: %d", len(b))
}
var dec big.Int
dec.SetBytes(b)
*e = Byte48{(hexutil.Big)(dec)}
return nil
}
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []ChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof []byte `json:"kzg_proof"`
KzgCommitment []byte `json:"kzg_commitment"`
Challenge common.Hash `json:"challenge"`
Version uint8 `json:"version"`
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof *Byte48 `json:"kzg_proof,omitempty"`
KzgCommitment *Byte48 `json:"kzg_commitment,omitempty"`
// ChallengeDigest should be a common.Hash type if it is not nil
ChallengeDigest interface{} `json:"challenge_digest,omitempty"`
}
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
BatchProofs []BatchProof `json:"batch_proofs"`
Version uint8 `json:"version"`
// use one of the string of "euclidv1" / "euclidv2"
ForkName string `json:"fork_name"`
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
}
type RawBytes []byte
func (r RawBytes) MarshalJSON() ([]byte, error) {
if r == nil {
return []byte("null"), nil
}
// Marshal the []byte as a JSON array of numbers
rn := make([]uint16, len(r))
for i := range r {
rn[i] = uint16(r[i])
}
return json.Marshal(rn)
}
// ChunkInfo is for calculating pi_hash for chunk
type ChunkInfo struct {
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
TxBytesHash common.Hash `json:"tx_data_digest"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
// TxBytes []byte `json:"tx_bytes"`
TxBytesHash common.Hash `json:"tx_data_digest"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
TxDataLength uint64 `json:"tx_data_length"`
InitialBlockNumber uint64 `json:"initial_block_number"`
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
PrevBlockhash common.Hash `json:"prev_blockhash"`
PostBlockhash common.Hash `json:"post_blockhash"`
EncryptionKey RawBytes `json:"encryption_key"`
}
// SubCircuitRowUsage tracing info added in v0.11.0rc8
type SubCircuitRowUsage struct {
Name string `json:"name"`
RowNumber uint64 `json:"row_number"`
// BlockContextV2 is the block context for euclid v2
type BlockContextV2 struct {
Timestamp uint64 `json:"timestamp"`
BaseFee hexutil.Big `json:"base_fee"`
GasLimit uint64 `json:"gas_limit"`
NumTxs uint16 `json:"num_txs"`
NumL1Msgs uint16 `json:"num_l1_msgs"`
}
// ChunkProof
type ChunkProof interface {
Proof() []byte
}
// NewChunkProof creates a new ChunkProof instance.
func NewChunkProof(hardForkName string) ChunkProof {
switch hardForkName {
case euclidFork:
return &OpenVMChunkProof{}
default:
return &Halo2ChunkProof{}
}
}
// Halo2ChunkProof includes the proof info that are required for chunk verification and rollup.
type Halo2ChunkProof struct {
StorageTrace []byte `json:"storage_trace,omitempty"`
Protocol []byte `json:"protocol"`
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
GitVersion string `json:"git_version,omitempty"`
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
}
// Proof returns the proof bytes of a ChunkProof
func (ap *Halo2ChunkProof) Proof() []byte {
return ap.RawProof
}
// BatchProof
type BatchProof interface {
SanityCheck() error
Proof() []byte
}
// NewBatchProof creates a new BatchProof instance.
func NewBatchProof(hardForkName string) BatchProof {
switch hardForkName {
case euclidFork:
return &OpenVMBatchProof{}
default:
return &Halo2BatchProof{}
}
}
// Halo2BatchProof includes the proof info that are required for batch verification and rollup.
type Halo2BatchProof struct {
Protocol []byte `json:"protocol"`
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
BatchHash common.Hash `json:"batch_hash"`
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes of a BatchProof
func (ap *Halo2BatchProof) Proof() []byte {
return ap.RawProof
}
// SanityCheck checks whether a BatchProof is in a legal format
func (ap *Halo2BatchProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.RawProof) == 0 {
return errors.New("proof not ready")
}
if len(ap.RawProof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
}
// BundleProof
type BundleProof interface {
SanityCheck() error
Proof() []byte
}
// NewBundleProof creates a new BundleProof instance.
func NewBundleProof(hardForkName string) BundleProof {
switch hardForkName {
case euclidFork:
return &OpenVMBundleProof{}
default:
return &Halo2BundleProof{}
}
}
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
type Halo2BundleProof struct {
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes of a BundleProof
func (ap *Halo2BundleProof) Proof() []byte {
return ap.RawProof
}
// SanityCheck checks whether a BundleProof is in a legal format
func (ap *Halo2BundleProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.RawProof) == 0 {
return errors.New("proof not ready")
}
if len(ap.RawProof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
// Metric data carried with OpenVMProof
type OpenVMProofStat struct {
TotalCycle uint64 `json:"total_cycles"`
ExecutionTimeMills uint64 `json:"execution_time_mills"`
ProvingTimeMills uint64 `json:"proving_time_mills"`
}
// Proof for flatten VM proof
type OpenVMProof struct {
Proof []byte `json:"proofs"`
PublicValues []byte `json:"public_values"`
Proof []byte `json:"proofs"`
PublicValues []byte `json:"public_values"`
Stat *OpenVMProofStat `json:"stat,omitempty"`
}
// Proof for flatten EVM proof
@@ -239,7 +179,8 @@ type OpenVMEvmProof struct {
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
type OpenVMChunkProof struct {
MetaData struct {
ChunkInfo *ChunkInfo `json:"chunk_info"`
ChunkInfo *ChunkInfo `json:"chunk_info"`
TotalGasUsed uint64 `json:"chunk_total_gas"`
} `json:"metadata"`
VmProof *OpenVMProof `json:"proof"`
@@ -258,12 +199,15 @@ func (p *OpenVMChunkProof) Proof() []byte {
// OpenVMBatchInfo is for calculating pi_hash for batch header
type OpenVMBatchInfo struct {
ParentBatchHash common.Hash `json:"parent_batch_hash"`
ParentStateRoot common.Hash `json:"parent_state_root"`
StateRoot common.Hash `json:"state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
BatchHash common.Hash `json:"batch_hash"`
ChainID uint64 `json:"chain_id"`
ParentBatchHash common.Hash `json:"parent_batch_hash"`
ParentStateRoot common.Hash `json:"parent_state_root"`
StateRoot common.Hash `json:"state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
BatchHash common.Hash `json:"batch_hash"`
ChainID uint64 `json:"chain_id"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
EncryptionKey RawBytes `json:"encryption_key"`
}
// BatchProof includes the proof info that are required for batch verification and rollup.
@@ -323,6 +267,8 @@ type OpenVMBundleInfo struct {
NumBatches uint32 `json:"num_batches"`
PrevBatchHash common.Hash `json:"prev_batch_hash"`
BatchHash common.Hash `json:"batch_hash"`
MsgQueueHash common.Hash `json:"msg_queue_hash"`
EncryptionKey RawBytes `json:"encryption_key"`
}
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.

View File

@@ -0,0 +1,22 @@
package message
import (
"fmt"
"testing"
)
func TestBytes48(t *testing.T) {
ti := &Byte48{}
ti.UnmarshalText([]byte("0x1"))
if s, err := ti.MarshalText(); err == nil {
if len(s) != 98 {
panic(fmt.Sprintf("wrong str: %s", s))
}
}
ti.UnmarshalText([]byte("0x0"))
if s, err := ti.MarshalText(); err == nil {
if len(s) != 98 {
panic(fmt.Sprintf("wrong str: %s", s))
}
}
}

23
common/utils/blob.go Normal file
View File

@@ -0,0 +1,23 @@
package utils
import (
"crypto/sha256"
"fmt"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
)
// CalculateVersionedBlobHash calculate the kzg4844 versioned blob hash from a blob
func CalculateVersionedBlobHash(blob kzg4844.Blob) ([32]byte, error) {
// calculate kzg4844 commitment from blob
commit, err := kzg4844.BlobToCommitment(&blob)
if err != nil {
return [32]byte{}, fmt.Errorf("failed to get blob commitment, err: %w", err)
}
// calculate kzg4844 versioned blob hash from blob commitment
hasher := sha256.New()
vh := kzg4844.CalcBlobHashV1(hasher, &commit)
return vh, nil
}

51
common/utils/blob_test.go Normal file
View File

@@ -0,0 +1,51 @@
package utils
import (
"encoding/hex"
"encoding/json"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
)
type BlobData struct {
VersionedBlobHash string `json:"versionedBlobHash"`
BlobData string `json:"blobData"`
}
// TestCalculateVersionedBlobHash tests the CalculateVersionedBlobHash function
func TestCalculateVersionedBlobHash(t *testing.T) {
// Read the test data
data, err := os.ReadFile("../testdata/blobdata.json")
if err != nil {
t.Fatalf("Failed to read blobdata.json: %v", err)
}
var blobData BlobData
if err := json.Unmarshal(data, &blobData); err != nil {
t.Fatalf("Failed to parse blobdata.json: %v", err)
}
blobBytes, err := hex.DecodeString(blobData.BlobData)
if err != nil {
t.Fatalf("Failed to decode blob data: %v", err)
}
// Convert []byte to kzg4844.Blob
var blob kzg4844.Blob
copy(blob[:], blobBytes)
// Calculate the hash
calculatedHashBytes, err := CalculateVersionedBlobHash(blob)
if err != nil {
t.Fatalf("Failed to calculate versioned blob hash: %v", err)
}
calculatedHash := hex.EncodeToString(calculatedHashBytes[:])
if calculatedHash != blobData.VersionedBlobHash {
t.Fatalf("Hash mismatch: got %s, want %s", calculatedHash, blobData.VersionedBlobHash)
}
}

View File

@@ -20,9 +20,12 @@ var (
}
// RollupRelayerFlags contains flags only used in rollup-relayer
RollupRelayerFlags = []cli.Flag{
&ImportGenesisFlag,
&MinCodecVersionFlag,
}
// ProposerToolFlags contains flags only used in proposer tool
ProposerToolFlags = []cli.Flag{
&StartL2BlockFlag,
}
// ConfigFileFlag load json type config file.
ConfigFileFlag = cli.StringFlag{
Name: "config",
@@ -73,12 +76,6 @@ var (
Category: "METRICS",
Value: 6060,
}
// ImportGenesisFlag import genesis batch during startup
ImportGenesisFlag = cli.BoolFlag{
Name: "import-genesis",
Usage: "Import genesis batch into L1 contract during startup",
Value: false,
}
// ServicePortFlag is the port the service will listen on
ServicePortFlag = cli.IntFlag{
Name: "service.port",
@@ -97,4 +94,10 @@ var (
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
Required: true,
}
// StartL2BlockFlag indicates the start L2 block number for proposer tool
StartL2BlockFlag = cli.Uint64Flag{
Name: "start-l2-block",
Usage: "Start L2 block number for proposer tool",
Value: 0,
}
)

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.98"
var tag = "v4.7.10"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -23,7 +23,7 @@ var commit = func() string {
return "000000"
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
// ZkVersion is commit-id of cargo.lock/zkvm-prover and openvm, contacted by a "-"
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
var ZkVersion = "000000-000000"

View File

@@ -1,3 +1,4 @@
/build/bin
.idea
internal/logic/verifier/lib
internal/libzkp/lib/libzkp.so

View File

@@ -1,26 +1,31 @@
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator libzkp
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
LIBZKP_PATH=./internal/logic/libzkp/lib/libzkp.so
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZKVM_VERSION=$(shell grep -m 1 "zkvm-prover?" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
OPENVM_VERSION=$(shell grep -m 1 "openvm.git" ../Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
ZK_VERSION=${ZKVM_VERSION}-${OPENVM_VERSION}
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
$(LIBZKP_PATH):
$(MAKE) -C ./internal/logic/libzkp build
coordinator_api: libzkp ## Builds the Coordinator api instance.
clean_libzkp:
$(MAKE) -C ./internal/logic/libzkp clean
libzkp: clean_libzkp $(LIBZKP_PATH)
coordinator_api: $(LIBZKP_PATH) ## Builds the Coordinator api instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
coordinator_cron:
@@ -29,8 +34,21 @@ coordinator_cron:
coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_api_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
localsetup: coordinator_api ## Local setup: build coordinator_api, copy config, and setup releases
mkdir -p build/bin/conf
@echo "Copying configuration files..."
@if [ -f "$(PWD)/conf/config.template.json" ]; then \
SRC="$(PWD)/conf/config.template.json"; \
else \
SRC="$(CURDIR)/conf/config.json"; \
fi; \
cp -fL "$$SRC" "$(CURDIR)/build/bin/conf/config.template.json"
@echo "Setting up releases..."
cd $(CURDIR)/build && bash setup_releases.sh
#coordinator_api_skip_libzkp:
# go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
mock_coordinator_api: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_api ./cmd/api
@@ -38,15 +56,15 @@ mock_coordinator_api: ## Builds the mocked Coordinator instance.
mock_coordinator_cron: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
test-verifier: libzkp
test-verifier: $(LIBZKP_PATH)
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
test-gpu-verifier: libzkp
test-gpu-verifier: $(LIBZKP_PATH)
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
lint: ## Lint the files - used for CI
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
GOBIN=$(PWD)/build/bin go run ../build/lint.go
cd ../ && cargo fmt --all -- --check && cargo clippy --release -- -D warnings
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -0,0 +1,72 @@
#!/bin/bash
# release version
if [ -z "${SCROLL_ZKVM_VERSION}" ]; then
echo "SCROLL_ZKVM_VERSION not set"
exit 1
fi
# default fork name from env or "galileo"
SCROLL_FORK_NAME="${SCROLL_FORK_NAME:-galileov2}"
# set ASSET_DIR by reading from config.json
CONFIG_FILE="bin/conf/config.template.json"
if [ ! -f "$CONFIG_FILE" ]; then
echo "Config file $CONFIG_FILE not found"
exit 1
fi
# get the number of verifiers in the array
VERIFIER_COUNT=$(jq -r '.prover_manager.verifier.verifiers | length' "$CONFIG_FILE")
if [ "$VERIFIER_COUNT" = "null" ] || [ "$VERIFIER_COUNT" -eq 0 ]; then
echo "No verifiers found in config file"
exit 1
fi
echo "Found $VERIFIER_COUNT verifier(s) in config"
# iterate through each verifier entry
for ((i=0; i<$VERIFIER_COUNT; i++)); do
# extract assets_path for current verifier
ASSETS_PATH=$(jq -r ".prover_manager.verifier.verifiers[$i].assets_path" "$CONFIG_FILE")
FORK_NAME=$(jq -r ".prover_manager.verifier.verifiers[$i].fork_name" "$CONFIG_FILE")
# skip if this verifier's fork doesn't match the target fork
if [ "$FORK_NAME" != "$SCROLL_FORK_NAME" ]; then
echo "Expect $SCROLL_FORK_NAME, skip current fork ($FORK_NAME)"
continue
fi
if [ "$ASSETS_PATH" = "null" ]; then
echo "Warning: Could not find assets_path for verifier $i, skipping..."
continue
fi
echo "Processing verifier $i ($FORK_NAME): assets_path=$ASSETS_PATH"
# check if it's an absolute path (starts with /)
if [[ "$ASSETS_PATH" = /* ]]; then
# absolute path, use as is
ASSET_DIR="$ASSETS_PATH"
else
# relative path, prefix with "bin/"
ASSET_DIR="bin/$ASSETS_PATH"
fi
echo "Using ASSET_DIR: $ASSET_DIR"
# create directory if it doesn't exist
mkdir -p "$ASSET_DIR"
# assets for verifier-only mode
echo "Downloading assets for $FORK_NAME to $ASSET_DIR..."
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/verifier.bin -O ${ASSET_DIR}/verifier.bin
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/root_verifier_vk -O ${ASSET_DIR}/root_verifier_vk
wget https://circuit-release.s3.us-west-2.amazonaws.com/scroll-zkvm/releases/$SCROLL_ZKVM_VERSION/verifier/openVmVk.json -O ${ASSET_DIR}/openVmVk.json
echo "Completed downloading assets for $FORK_NAME"
echo "---"
done
echo "All verifier assets downloaded successfully"

View File

@@ -90,18 +90,12 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{
MockMode: true,
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwin",
MinProverVersion: "v4.2.0",
},
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwinV2",
MinProverVersion: "v4.3.0",
MinProverVersion: "v4.4.89",
Verifiers: []coordinatorConfig.AssetConfig{
{
AssetsPath: "",
ForkName: "galileo",
},
},
},
BatchCollectionTimeSec: 60,

View File

@@ -19,6 +19,7 @@ import (
)
var app *cli.App
var cfg *config.Config
func init() {
// Set up coordinator app info.
@@ -29,16 +30,29 @@ func init() {
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
if err := utils.LogSetup(ctx); err != nil {
return err
}
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
var err error
cfg, err = config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
return nil
}
// sub commands
app.Commands = []*cli.Command{
{
Name: "verify",
Usage: "verify an proof, specified by [forkname] <type> <proof path>",
Action: verify,
},
}
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
@@ -62,14 +76,14 @@ func action(ctx *cli.Context) error {
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
}
var batchProofs []message.BatchProof
var batchProofs []*message.OpenVMBatchProof
for _, batch := range batches {
proof := message.NewBatchProof("darwinV2")
var proof message.OpenVMBatchProof
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
log.Error("failed to unmarshal batch proof")
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
}
batchProofs = append(batchProofs, proof)
batchProofs = append(batchProofs, &proof)
}
taskDetail := message.BundleTaskDetail{

View File

@@ -0,0 +1,109 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
)
func verify(cCtx *cli.Context) error {
var forkName, proofType, proofPath string
if cCtx.Args().Len() <= 2 {
forkName = cfg.ProverManager.Verifier.Verifiers[0].ForkName
proofType = cCtx.Args().First()
proofPath = cCtx.Args().Get(1)
} else {
forkName = cCtx.Args().First()
proofType = cCtx.Args().Get(1)
proofPath = cCtx.Args().Get(2)
}
log.Info("verify proof", "in", proofPath, "type", proofType, "forkName", forkName)
// Load the content of the proof file
data, err := os.ReadFile(filepath.Clean(proofPath))
if err != nil {
return fmt.Errorf("error reading file: %w", err)
}
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, cfg.L2.ValidiumMode)
if err != nil {
return err
}
var ret bool
switch strings.ToLower(proofType) {
case "chunk":
proof := &message.OpenVMChunkProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.ChunkVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
if len(proof.Vk) != 0 {
if !bytes.Equal(proof.Vk, vk) {
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
base64.StdEncoding.EncodeToString(vk),
base64.StdEncoding.EncodeToString(proof.Vk),
)
}
} else {
proof.Vk = vk
}
ret, err = vf.VerifyChunkProof(proof, forkName)
case "batch":
proof := &message.OpenVMBatchProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.BatchVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
if len(proof.Vk) != 0 {
if !bytes.Equal(proof.Vk, vk) {
return fmt.Errorf("unmatch vk with expected: expected %s, get %s",
base64.StdEncoding.EncodeToString(vk),
base64.StdEncoding.EncodeToString(proof.Vk),
)
}
} else {
proof.Vk = vk
}
ret, err = vf.VerifyBatchProof(proof, forkName)
case "bundle":
proof := &message.OpenVMBundleProof{}
if err := json.Unmarshal(data, proof); err != nil {
return err
}
vk, ok := vf.BundleVk[forkName]
if !ok {
return fmt.Errorf("no vk loaded for fork %s", forkName)
}
proof.Vk = vk
ret, err = vf.VerifyBundleProof(proof, forkName)
default:
return fmt.Errorf("unsupport proof type %s", proofType)
}
if err != nil {
return err
}
log.Info("verified:", "ret", ret)
return nil
}

View File

@@ -7,19 +7,22 @@
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"low_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwin",
"min_prover_version": "v4.4.43"
},
"high_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwinV2",
"min_prover_version": "v4.4.45"
}
"min_prover_version": "v4.4.45",
"verifiers": [
{
"features": "legacy_witness:openvm_13",
"assets_path": "assets_feynman",
"fork_name": "feynman"
},
{
"assets_path": "assets",
"fork_name": "galileo"
},
{
"assets_path": "assets_v2",
"fork_name": "galileoV2"
}
]
}
},
"db": {
@@ -29,7 +32,10 @@
"maxIdleNum": 20
},
"l2": {
"chain_id": 111
"chain_id": 111,
"l2geth": {
"endpoint": "not need to specified for mocking"
}
},
"auth": {
"secret": "prover secret key",

View File

@@ -2,8 +2,6 @@ module scroll-tech/coordinator
go 1.22
toolchain go1.22.2
require (
github.com/appleboy/gin-jwt/v2 v2.9.1
github.com/gin-gonic/gin v1.9.1
@@ -11,8 +9,8 @@ require (
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/scroll-tech/da-codec v0.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0
github.com/urfave/cli/v2 v2.25.7
@@ -48,6 +46,7 @@ require (
)
require (
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
@@ -55,30 +54,59 @@ require (
github.com/consensys/bavard v0.1.29 // indirect
github.com/consensys/gnark-crypto v0.16.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844/v2 v2.1.5 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/huin/goupnp v1.0.2 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
github.com/supranational/blst v0.3.15 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

View File

@@ -1,12 +1,18 @@
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
@@ -24,6 +30,9 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
github.com/bytedance/sonic v1.10.1 h1:7a1wuFXL1cMy7a3f7/VFcEtriuXQnUBhtoVfOZiaysc=
github.com/bytedance/sonic v1.10.1/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
@@ -38,23 +47,39 @@ github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/crate-crypto/go-eth-kzg v1.4.0 h1:WzDGjHk4gFg6YzV0rJOAsTK4z3Qkz5jd4RE3DAvPFkg=
github.com/crate-crypto/go-eth-kzg v1.4.0/go.mod h1:J9/u5sWfznSObptgfa92Jq8rTswn6ahQWEuiLHOjCUI=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s=
github.com/ethereum/c-kzg-4844/v2 v2.1.5/go.mod h1:u59hRTTah4Co6i9fDWtiCjTrblJv0UwsqZKCc0GfgUs=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
@@ -73,19 +98,31 @@ github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7N
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -93,13 +130,24 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
@@ -115,6 +163,7 @@ github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@@ -129,14 +178,22 @@ github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
@@ -145,51 +202,76 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/da-codec v0.10.0 h1:IPHxyTyXTWPV0Q+DZ08cod2fWkhUvrfysmj/VBpB+WU=
github.com/scroll-tech/da-codec v0.10.0/go.mod h1:MBlIP4wCXPcUDZ/Ci2B7n/2IbVU1WBo9OTFTZ5ffE0U=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f h1:j6SjP98MoWFFX9TwB1/nFYEkayqHQsrtE66Ll2C+oT0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20251128092113-8629f088d78f/go.mod h1:Aa/kD1XB+OV/7rRxMQrjcPCB4b0pKyLH0gsTrtuHi38=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -200,8 +282,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.15 h1:rd9viN6tfARE5wv3KZJ9H8e1cg0jXW8syFCcsbHa76o=
github.com/supranational/blst v0.3.15/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
@@ -216,6 +298,8 @@ github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPD
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
@@ -227,11 +311,16 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
@@ -240,7 +329,10 @@ golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -250,13 +342,25 @@ golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -269,36 +373,56 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -28,10 +28,17 @@ type ProverManager struct {
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"`
}
// l2geth client configuration items
type L2Endpoint struct {
Url string `json:"endpoint"`
}
// L2 loads l2geth configuration items.
type L2 struct {
// l2geth chain_id.
ChainID uint64 `json:"chain_id"`
ChainID uint64 `json:"chain_id"`
Endpoint *L2Endpoint `json:"l2geth"`
ValidiumMode bool `json:"validium_mode"`
}
// Auth provides the auth coordinator
@@ -41,27 +48,34 @@ type Auth struct {
LoginExpireDurationSec int `json:"login_expire_duration_sec"`
}
// The sequencer controlled data
type Sequencer struct {
DecryptionKey string `json:"decryption_key"`
}
// Config load configuration items.
type Config struct {
ProverManager *ProverManager `json:"prover_manager"`
DB *database.Config `json:"db"`
L2 *L2 `json:"l2"`
Auth *Auth `json:"auth"`
Sequencer *Sequencer `json:"sequencer"`
}
// CircuitConfig circuit items.
type CircuitConfig struct {
ParamsPath string `json:"params_path"`
// AssetConfig contain assets configurated for each fork, the defaul vkfile name is "OpenVmVk.json".
type AssetConfig struct {
AssetsPath string `json:"assets_path"`
Version uint8 `json:"version,omitempty"`
ForkName string `json:"fork_name"`
MinProverVersion string `json:"min_prover_version"`
Vkfile string `json:"vk_file,omitempty"`
MinProverVersion string `json:"min_prover_version,omitempty"`
Features string `json:"features,omitempty"`
}
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
MockMode bool `json:"mock_mode"`
LowVersionCircuit *CircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
MinProverVersion string `json:"min_prover_version"`
Verifiers []AssetConfig `json:"verifiers"`
}
// NewConfig returns a new instance of Config.

View File

@@ -15,15 +15,18 @@ func TestConfig(t *testing.T) {
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"external_prover_threshold": 32,
"bundle_collection_time_sec": 180,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"params_path": "",
"agg_vk_path": ""
"min_prover_version": "v4.4.45",
"verifiers": [{
"assets_path": "assets",
"fork_name": "galileo"
}]
},
"max_verifier_workers": 4,
"min_prover_version": "v1.0.0"
"max_verifier_workers": 4
},
"db": {
"driver_name": "postgres",
@@ -32,13 +35,17 @@ func TestConfig(t *testing.T) {
"maxIdleNum": 20
},
"l2": {
"chain_id": 111
"chain_id": 111,
"validium_mode": false
},
"auth": {
"secret": "prover secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 3600
}
},
"sequencer": {
"decryption_key": "sequencer decryption key"
}
}`
t.Run("Success Case", func(t *testing.T) {

View File

@@ -1,12 +1,15 @@
package api
import (
"encoding/json"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/logic/verifier"
)
@@ -21,14 +24,27 @@ var (
// InitController inits Controller with database
func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) {
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
validiumMode := cfg.L2.ValidiumMode
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier, validiumMode)
if err != nil {
panic("proof receiver new verifier failure")
}
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap, "bundleVerifier", vf.BundleVkMap, "openVmVerifier", vf.OpenVMVkMap)
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
// TODO: enable this when the libzkp has been updated
l2cfg := cfg.L2.Endpoint
if l2cfg == nil {
panic("l2geth is not specified")
}
l2cfgBytes, err := json.Marshal(l2cfg)
if err != nil {
panic(err)
}
libzkp.InitL2geth(string(l2cfgBytes))
Auth = NewAuthController(db, cfg, vf)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)
}

View File

@@ -17,6 +17,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -25,13 +26,15 @@ type GetTaskController struct {
proverTasks map[message.ProofType]provertask.ProverTask
getTaskAccessCounter *prometheus.CounterVec
l2syncer *l2Syncer
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg)
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg)
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, verifier *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, verifier.ChunkVk, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, verifier.BatchVk, reg)
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, verifier.BundleVk, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
@@ -44,6 +47,13 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask
if syncer, err := createL2Syncer(cfg); err != nil {
log.Crit("can not init l2 syncer", "err", err)
} else {
ptc.l2syncer = syncer
}
return ptc
}
@@ -78,6 +88,17 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
return
}
if getTaskParameter.ProverHeight == 0 {
// help update the prover height with internal l2geth
if blk, err := ptc.l2syncer.getLatestBlockNumber(ctx); err == nil {
getTaskParameter.ProverHeight = blk
} else {
nerr := fmt.Errorf("inner l2geth failure, err:%w", err)
types.RenderFailure(ctx, types.InternalServerError, nerr)
return
}
}
proofType := ptc.proofType(&getTaskParameter)
proverTask, isExist := ptc.proverTasks[proofType]
if !isExist {

View File

@@ -0,0 +1,71 @@
//go:build !mock_verifier
package api
import (
"errors"
"fmt"
"sync"
"time"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
)
type l2Syncer struct {
l2gethClient *ethclient.Client
lastBlockNumber struct {
sync.RWMutex
data uint64
t time.Time
}
}
func createL2Syncer(cfg *config.Config) (*l2Syncer, error) {
if cfg.L2 == nil || cfg.L2.Endpoint == nil {
return nil, fmt.Errorf("l2 endpoint is not set in config")
} else {
l2gethClient, err := ethclient.Dial(cfg.L2.Endpoint.Url)
if err != nil {
return nil, fmt.Errorf("dial l2geth endpoint fail, err: %s", err)
}
return &l2Syncer{
l2gethClient: l2gethClient,
}, nil
}
}
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
func (syncer *l2Syncer) getLatestBlockNumber(ctx *gin.Context) (uint64, error) {
// First check if we have a cached value that's still valid
syncer.lastBlockNumber.RLock()
if !syncer.lastBlockNumber.t.IsZero() && time.Since(syncer.lastBlockNumber.t) < time.Second*10 {
blockNumber := syncer.lastBlockNumber.data
syncer.lastBlockNumber.RUnlock()
return blockNumber, nil
}
syncer.lastBlockNumber.RUnlock()
// If not cached or expired, fetch from the client
if syncer.l2gethClient == nil {
return 0, errors.New("L2 geth client not initialized")
}
blockNumber, err := syncer.l2gethClient.BlockNumber(ctx)
if err != nil {
return 0, fmt.Errorf("failed to get latest block number: %w", err)
}
// Update the cache
syncer.lastBlockNumber.Lock()
syncer.lastBlockNumber.data = blockNumber
syncer.lastBlockNumber.t = time.Now()
syncer.lastBlockNumber.Unlock()
log.Debug("updated block height reference", "height", blockNumber)
return blockNumber, nil
}

View File

@@ -0,0 +1,20 @@
//go:build mock_verifier
package api
import (
"scroll-tech/coordinator/internal/config"
"github.com/gin-gonic/gin"
)
type l2Syncer struct{}
func createL2Syncer(_ *config.Config) (*l2Syncer, error) {
return &l2Syncer{}, nil
}
// getLatestBlockNumber gets the latest block number, using cache if available and not expired
func (syncer *l2Syncer) getLatestBlockNumber(_ *gin.Context) (uint64, error) {
return 99999994, nil
}

View File

@@ -21,39 +21,22 @@ import (
type LoginLogic struct {
cfg *config.Config
challengeOrm *orm.Challenge
chunkVks map[string]struct{}
batchVKs map[string]struct{}
bundleVks map[string]struct{}
openVmVks map[string]struct{}
proverVersionHardForkMap map[string][]string
proverVersionHardForkMap map[string]string
}
// NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string][]string)
if version.CheckScrollRepoVersion(cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
log.Error("config file error, low verifier min_prover_version should not more than high verifier min_prover_version",
"low verifier min_prover_version", cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion,
"high verifier min_prover_version", cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion)
panic("verifier config file error")
}
proverVersionHardForkMap := make(map[string]string)
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
if cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != "euclid" {
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
for _, cfg := range cfg.ProverManager.Verifier.Verifiers {
proverVersionHardForkMap[cfg.ForkName] = cfg.MinProverVersion
}
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}
return &LoginLogic{
cfg: cfg,
chunkVks: vf.ChunkVKMap,
batchVKs: vf.BatchVKMap,
bundleVks: vf.BundleVkMap,
openVmVks: vf.OpenVMVkMap,
challengeOrm: orm.NewChallenge(db),
proverVersionHardForkMap: proverVersionHardForkMap,
@@ -73,46 +56,25 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
return errors.New("auth message verify failure")
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, login.Message.ProverVersion)
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.MinProverVersion, login.Message.ProverVersion)
}
if len(login.Message.ProverTypes) > 0 {
vks := make(map[string]struct{})
for _, proverType := range login.Message.ProverTypes {
switch proverType {
case types.ProverTypeChunk:
for vk := range l.chunkVks {
vks[vk] = struct{}{}
}
case types.ProverTypeBatch:
for vk := range l.batchVKs {
vks[vk] = struct{}{}
}
for vk := range l.bundleVks {
vks[vk] = struct{}{}
}
case types.ProverTypeOpenVM:
for vk := range l.openVmVks {
vks[vk] = struct{}{}
}
default:
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
}
}
vks := make(map[string]struct{})
for vk := range l.openVmVks {
vks[vk] = struct{}{}
}
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
}
}
@@ -137,9 +99,15 @@ func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, er
}
proverVersion := proverVersionSplits[0]
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
return strings.Join(hardForkNames, ","), nil
var hardForkNames []string
for n, minVersion := range l.proverVersionHardForkMap {
if minVersion == "" || version.CheckScrollRepoVersion(proverVersion, minVersion) {
hardForkNames = append(hardForkNames, n)
}
}
if len(hardForkNames) == 0 {
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
}
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
return strings.Join(hardForkNames, ","), nil
}

View File

@@ -0,0 +1,17 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release -p libzkp-c
@mkdir -p lib
@cp -f ../../../../target/release/libzkp.so lib/
fmt:
@cargo fmt --all -- --check
clean:
@cargo clean --release -p libzkp -p libzkp-c -p l2geth
@rm -f lib/libzkp.so
clippy:
@cargo check --release --all-features
@cargo clippy --release -- -D warnings

View File

@@ -20,7 +20,7 @@ function build_test_bins() {
cd $REPO/coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd $REPO/common/libzkp
cd $REPO/coordinator/internal/logic/libzkp
}
build_test_bins

View File

@@ -0,0 +1,159 @@
package libzkp
/*
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import (
"fmt"
"os"
"strings"
"unsafe"
"scroll-tech/common/types/message"
)
func init() {
C.init_tracing()
}
// Helper function to convert Go string to C string and handle cleanup
func goToCString(s string) *C.char {
return C.CString(s)
}
// Helper function to free C string
func freeCString(s *C.char) {
C.free(unsafe.Pointer(s))
}
// Initialize the verifier
func InitVerifier(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_verifier(cConfig)
}
// Verify a chunk proof
func VerifyChunkProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_chunk_proof(cProof, cForkName)
return result != 0
}
// Verify a batch proof
func VerifyBatchProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_batch_proof(cProof, cForkName)
return result != 0
}
// Verify a bundle proof
func VerifyBundleProof(proofData, forkName string) bool {
cProof := goToCString(proofData)
cForkName := goToCString(strings.ToLower(forkName))
defer freeCString(cProof)
defer freeCString(cForkName)
result := C.verify_bundle_proof(cProof, cForkName)
return result != 0
}
// TaskType enum values matching the Rust enum
const (
TaskTypeChunk = 0
TaskTypeBatch = 1
TaskTypeBundle = 2
)
func fromMessageTaskType(taskType int) int {
switch message.ProofType(taskType) {
case message.ProofTypeChunk:
return TaskTypeChunk
case message.ProofTypeBatch:
return TaskTypeBatch
case message.ProofTypeBundle:
return TaskTypeBundle
default:
panic(fmt.Sprintf("unsupported proof type: %d", taskType))
}
}
// Generate a universal task
func GenerateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
return generateUniversalTask(fromMessageTaskType(taskType), taskJSON, strings.ToLower(forkName), expectedVk, decryptionKey)
}
// Generate wrapped proof
func GenerateWrappedProof(proofJSON, metadata string, vkData []byte) string {
cProofJSON := goToCString(proofJSON)
cMetadata := goToCString(metadata)
defer freeCString(cProofJSON)
defer freeCString(cMetadata)
// Create a C array from Go slice
var cVkData *C.char
if len(vkData) > 0 {
cVkData = (*C.char)(unsafe.Pointer(&vkData[0]))
}
resultPtr := C.gen_wrapped_proof(cProofJSON, cMetadata, cVkData, C.size_t(len(vkData)))
if resultPtr == nil {
return ""
}
// Convert result to Go string and free C memory
result := C.GoString(resultPtr)
C.release_string(resultPtr)
return result
}
// Dumps a verification key to a file
func DumpVk(forkName, filePath string) error {
cForkName := goToCString(strings.ToLower(forkName))
cFilePath := goToCString(filePath)
defer freeCString(cForkName)
defer freeCString(cFilePath)
// Call the C function to dump the verification key
C.dump_vk(cForkName, cFilePath)
// Check if the file was created successfully
// Note: The C function doesn't return an error code, so we check if the file exists
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return fmt.Errorf("failed to dump verification key: file %s was not created", filePath)
}
return nil
}
// UnivTaskCompatibilityFix calls the universal task compatibility fix function
func UniversalTaskCompatibilityFix(taskJSON string) (string, error) {
cTaskJSON := goToCString(taskJSON)
defer freeCString(cTaskJSON)
resultPtr := C.univ_task_compatibility_fix(cTaskJSON)
if resultPtr == nil {
return "", fmt.Errorf("univ_task_compatibility_fix failed")
}
// Convert result to Go string and free C memory
result := C.GoString(resultPtr)
C.release_string(resultPtr)
return result, nil
}

View File

@@ -0,0 +1,62 @@
// Verifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
// - Verify a chunk proof
#ifndef LIBZKP_H
#define LIBZKP_H
#include <stddef.h> // For size_t
// Init log tracing
void init_tracing();
// Initialize the verifier with configuration
void init_verifier(char* config);
// Initialize the l2geth with configuration
void init_l2geth(char* config);
// Verify proofs - returns non-zero for success, zero for failure
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
char verify_chunk_proof(char* proof, char* fork_name);
// Dump verification key to file
void dump_vk(char* fork_name, char* file);
// The result struct to hold data from handling a proving task
typedef struct {
char ok;
char* universal_task;
char* metadata;
char expected_pi_hash[32];
} HandlingResult;
// Generate a universal task based on task type and input JSON
// Returns a struct containing task data, metadata, and expected proof hash
HandlingResult gen_universal_task(
int task_type,
char* task,
char* fork_name,
const unsigned char* expected_vk,
size_t expected_vk_len,
const unsigned char* decryption_key,
size_t decryption_key_len
);
// Release memory allocated for a HandlingResult returned by gen_universal_task
void release_task_result(HandlingResult result);
// Generate a wrapped proof from the universal prover output and metadata
// Returns a JSON string containing the wrapped proof, or NULL on error
// The caller must call release_string on the returned pointer when done
char* gen_wrapped_proof(char* proof_json, char* metadata, char* vk, size_t vk_len);
// Release memory allocated for a string returned by gen_wrapped_proof
void release_string(char* string_ptr);
// Universal task compatibility fix function
char* univ_task_compatibility_fix(char* task_json);
#endif /* LIBZKP_H */

View File

@@ -0,0 +1,45 @@
//go:build mock_verifier
package libzkp
import (
"encoding/json"
"fmt"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/common"
)
func InitL2geth(configJSON string) {
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
fmt.Printf("call mocked generate universal task %d, taskJson %s\n", taskType, taskJSON)
var metadata interface{}
switch taskType {
case TaskTypeChunk:
metadata = struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
}{ChunkInfo: &message.ChunkInfo{}}
case TaskTypeBatch:
metadata = struct {
BatchInfo *message.OpenVMBatchInfo `json:"batch_info"`
BatchHash common.Hash `json:"batch_hash"`
}{BatchInfo: &message.OpenVMBatchInfo{}}
case TaskTypeBundle:
metadata = struct {
BundleInfo *message.OpenVMBundleInfo `json:"bundle_info"`
BundlePIHash common.Hash `json:"bundle_pi_hash"`
}{BundleInfo: &message.OpenVMBundleInfo{}}
}
encodeData, err := json.Marshal(metadata)
if err != nil {
fmt.Println("mock encoding json fail:", err)
return false, "", "", nil
}
return true, "UniversalTask data is not parsed", string(encodeData), []byte{0}
}

View File

@@ -0,0 +1,57 @@
//go:build !mock_verifier
package libzkp
/*
#include <stdlib.h>
#include "libzkp.h"
*/
import "C" //nolint:typecheck
import "unsafe"
// Initialize the handler for universal task
func InitL2geth(configJSON string) {
cConfig := goToCString(configJSON)
defer freeCString(cConfig)
C.init_l2geth(cConfig)
}
func generateUniversalTask(taskType int, taskJSON, forkName string, expectedVk []byte, decryptionKey []byte) (bool, string, string, []byte) {
cTask := goToCString(taskJSON)
cForkName := goToCString(forkName)
defer freeCString(cTask)
defer freeCString(cForkName)
// Create a C array from Go slice
var cVk *C.uchar
if len(expectedVk) > 0 {
cVk = (*C.uchar)(unsafe.Pointer(&expectedVk[0]))
}
// Create a C array from Go slice
var cDk *C.uchar
if len(decryptionKey) > 0 {
cDk = (*C.uchar)(unsafe.Pointer(&decryptionKey[0]))
}
result := C.gen_universal_task(C.int(taskType), cTask, cForkName, cVk, C.size_t(len(expectedVk)), cDk, C.size_t(len(decryptionKey)))
defer C.release_task_result(result)
// Check if the operation was successful
if result.ok == 0 {
return false, "", "", nil
}
// Convert C strings to Go strings
universalTask := C.GoString(result.universal_task)
metadata := C.GoString(result.metadata)
// Convert C array to Go slice
piHash := make([]byte, 32)
for i := 0; i < 32; i++ {
piHash[i] = byte(result.expected_pi_hash[i])
}
return true, universalTask, metadata, piHash
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"math/big"
"time"
"github.com/gin-gonic/gin"
@@ -11,6 +12,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -34,12 +36,13 @@ type BatchProverTask struct {
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BatchProverTask {
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
db: db,
cfg: cfg,
chainCfg: chainCfg,
expectedVk: expectedVk,
blockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -81,10 +84,37 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBatch) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get batch has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBatchTask == nil {
// if the assigned batch dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped batch. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBatchTask, getTaskError = bp.batchOrm.GetBatchByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected batch", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBatchTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
@@ -112,29 +142,32 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
batchTask = tmpBatchTask
@@ -147,31 +180,59 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, batchTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("Generate universal prover task failure", "task_id", batchTask.Hash, "type", "batch", "err", err)
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
if isCompatibilityFixingVersion(taskCtx.ProverVersion) {
log.Info("Apply compatibility fixing for prover", "version", taskCtx.ProverVersion)
if err := fixCompatibility(taskMsg); err != nil {
log.Error("apply compatibility failure", "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
}
// Store session info.
if taskCtx.hasAssignedTask == nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
@@ -195,49 +256,35 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
}
var chunkProofs []message.ChunkProof
var chunkInfos []*message.ChunkInfo
var chunkProofs []*message.OpenVMChunkProof
// var chunkInfos []*message.ChunkInfo
for _, chunk := range chunks {
proof := message.NewChunkProof(hardForkName)
var proof message.OpenVMChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
}
chunkProofs = append(chunkProofs, proof)
chunkInfo := message.ChunkInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
IsPadding: false,
}
if haloProot, ok := proof.(*message.Halo2ChunkProof); ok {
if haloProot.ChunkInfo != nil {
chunkInfo.TxBytes = haloProot.ChunkInfo.TxBytes
}
}
chunkInfos = append(chunkInfos, &chunkInfo)
chunkProofs = append(chunkProofs, &proof)
}
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs)
taskDetail, err := bp.getBatchTaskDetail(batch, chunkProofs, hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
}
chunkProofsBytes, err := json.Marshal(taskDetail)
taskBytesWithchunkProofs, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", task.TaskID, err)
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
TaskData: string(taskBytesWithchunkProofs),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBatch.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
return taskMsg, nil
}
@@ -247,41 +294,59 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
}
}
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof) (*message.BatchTaskDetail, error) {
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
default:
return taskDetail, nil
}
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
// Get the version byte.
version, err := bp.version(hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
return nil, fmt.Errorf("failed to decode version byte: %w", err)
}
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
taskDetail := &message.BatchTaskDetail{
Version: version,
ChunkProofs: chunkProofs,
ForkName: hardForkName,
}
taskDetail.BatchHeader = batchHeader
taskDetail.BlobBytes = dbBatch.BlobBytes
if !bp.validiumMode() {
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case 0:
log.Warn("the codec version is 0, if it is not under integration test we have encountered an error here")
return taskDetail, nil
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7, encoding.CodecV8, encoding.CodecV9, encoding.CodecV10:
default:
return nil, fmt.Errorf("Unsupported codec version <%d>", dbBatchCodecVersion)
}
if len(dbBatch.BlobDataProof) < 160 {
return nil, fmt.Errorf("blob data proof length is less than 160 bytes = %d, taskID: %s: %s", len(dbBatch.BlobDataProof), dbBatch.Hash, common.Bytes2Hex(dbBatch.BlobDataProof))
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
if err != nil {
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
}
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
}
taskDetail.BatchHeader = batchHeader
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
// | z | y | kzg_commitment | kzg_proof |
// |---------|---------|----------------|-----------|
// | bytes32 | bytes32 | bytes48 | bytes48 |
taskDetail.KzgProof = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
taskDetail.KzgCommitment = &message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
} else {
log.Info("Apply validium mode for batch proving task")
codec := cutils.FromVersion(version)
batchHeader, decodeErr := codec.DABatchForTaskFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
}
batchHeader.SetHash(common.HexToHash(dbBatch.Hash))
taskDetail.BatchHeader = batchHeader
}
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
// | z | y | kzg_commitment | kzg_proof |
// |---------|---------|----------------|-----------|
// | bytes32 | bytes32 | bytes48 | bytes48 |
taskDetail.KzgProof = dbBatch.BlobDataProof[112:160]
taskDetail.KzgCommitment = dbBatch.BlobDataProof[64:112]
taskDetail.Challenge = common.Hash(dbBatch.BlobDataProof[0:32])
return taskDetail, nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -32,12 +33,13 @@ type BundleProverTask struct {
}
// NewBundleProverTask new a bundle collector
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProverTask {
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *BundleProverTask {
bp := &BundleProverTask{
BaseProverTask: BaseProverTask{
db: db,
chainCfg: chainCfg,
cfg: cfg,
expectedVk: expectedVk,
blockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -80,10 +82,37 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBundleTask *orm.Bundle
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeBundle) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get bundle has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBundleTask == nil {
// if the assigned chunk dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped bundle. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpBundleTask, getTaskError = bp.bundleOrm.GetBundleByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected bundle", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpBundleTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpBundleTask == nil {
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
@@ -111,31 +140,33 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
bundleTask = tmpBundleTask
break
}
@@ -146,31 +177,60 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
}
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: bundleTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBundle),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: bundleTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBundle),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
taskMsg, err := bp.formatProverTask(ctx.Copy(), proverTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = bp.applyUniversal(taskMsg)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("Generate universal prover task failure", "task_id", bundleTask.Hash, "type", "bundle", "err", err)
return nil, ErrCoordinatorInternalFailure
}
// bundle proof require snark
taskMsg.UseSnark = true
proverTask.Metadata = metadata
if isCompatibilityFixingVersion(taskCtx.ProverVersion) {
log.Info("Apply compatibility fixing for prover", "version", taskCtx.ProverVersion)
if err := fixCompatibility(taskMsg); err != nil {
log.Error("apply compatibility failure", "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
}
// Store session info.
if taskCtx.hasAssignedTask == nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
bp.bundleTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.bundleTaskGetTaskProver.With(prometheus.Labels{
@@ -194,17 +254,46 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
var batchProofs []message.BatchProof
var prevStateRoot common.Hash
// this would be common in test cases: the first batch has empty parent
if batches[0].Index > 1 {
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
}
prevStateRoot = common.HexToHash(parentBatch.StateRoot)
}
var batchProofs []*message.OpenVMBatchProof
for _, batch := range batches {
proof := message.NewBatchProof(hardForkName)
var proof message.OpenVMBatchProof
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
}
batchProofs = append(batchProofs, proof)
batchProofs = append(batchProofs, &proof)
}
// Get the version byte.
version, err := bp.version(hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to decode version byte: %w", err)
}
taskDetail := message.BundleTaskDetail{
Version: version,
BatchProofs: batchProofs,
ForkName: hardForkName,
}
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: prevStateRoot,
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
NumBatches: uint32(len(batches)),
PrevBatchHash: common.HexToHash(batches[0].ParentBatchHash),
BatchHash: common.HexToHash(batches[len(batches)-1].Hash),
MsgQueueHash: common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash),
}
batchProofsBytes, err := json.Marshal(taskDetail)
@@ -213,12 +302,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBundle),
TaskData: string(batchProofsBytes),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBundle.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
return taskMsg, nil
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -32,12 +33,13 @@ type ChunkProverTask struct {
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, expectedVk map[string][]byte, reg prometheus.Registerer) *ChunkProverTask {
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
db: db,
cfg: cfg,
chainCfg: chainCfg,
expectedVk: expectedVk,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
@@ -78,12 +80,39 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
if taskCtx.hasAssignedTask != nil {
if taskCtx.hasAssignedTask.TaskType != int16(message.ProofTypeChunk) {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
log.Debug("retrieved assigned task chunk", "taskID", taskCtx.hasAssignedTask.TaskID, "prover", taskCtx.ProverName)
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), taskCtx.hasAssignedTask.TaskID)
if getTaskError != nil {
log.Error("failed to get chunk has assigned to prover", "taskID", taskCtx.hasAssignedTask.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpChunkTask == nil {
// if the assigned chunk dropped, there would be too much issue to assign another
return nil, fmt.Errorf("prover with publicKey %s is already assigned a dropped chunk. ProverName: %s, ProverVersion: %s",
taskCtx.PublicKey, taskCtx.ProverName, taskCtx.ProverVersion)
}
} else if getTaskParameter.TaskID != "" {
tmpChunkTask, getTaskError = cp.chunkOrm.GetChunkByHash(ctx.Copy(), getTaskParameter.TaskID)
if getTaskError != nil {
log.Error("failed to get expected chunk", "taskID", getTaskParameter.TaskID, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
} else if tmpChunkTask == nil {
return nil, fmt.Errorf("Expected task (%s) is already dropped", getTaskParameter.TaskID)
}
}
if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil {
@@ -109,31 +138,33 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
// we are simply pick the chunk which has been assigned, so don't bother to update attempts or check failed before
if taskCtx.hasAssignedTask == nil {
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
chunkTask = tmpChunkTask
break
}
@@ -144,31 +175,51 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
var proverTask *orm.ProverTask
if taskCtx.hasAssignedTask == nil {
proverTask = &orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
} else {
proverTask = taskCtx.hasAssignedTask
}
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
taskMsg, err := cp.formatProverTask(ctx.Copy(), proverTask, chunkTask, hardForkName)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
if getTaskParameter.Universal {
var metadata []byte
taskMsg, metadata, err = cp.applyUniversal(taskMsg)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("Generate universal prover task failure", "task_id", chunkTask.Hash, "type", "chunk", "err", err)
return nil, ErrCoordinatorInternalFailure
}
proverTask.Metadata = metadata
}
if taskCtx.hasAssignedTask == nil {
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
}
// notice uuid is set as a side effect of InsertProverTask
taskMsg.UUID = proverTask.UUID.String()
cp.chunkTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
@@ -179,29 +230,42 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, chunk *orm.Chunk, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 {
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%v", task.TaskID, dbErr)
}
taskDetail := message.ChunkTaskDetail{
BlockHashes: blockHashes,
// Get the version byte.
version, err := cp.version(hardForkName)
if err != nil {
return nil, fmt.Errorf("failed to decode version byte: %w", err)
}
blockHashesBytes, err := json.Marshal(taskDetail)
var taskDetailBytes []byte
taskDetail := message.ChunkTaskDetail{
Version: version,
BlockHashes: blockHashes,
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
ForkName: hardForkName,
}
taskDetailBytes, err = json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)
}
proverTaskSchema := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes),
TaskData: string(taskDetailBytes),
HardForkName: hardForkName,
}
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeChunk.String(), "hard_fork_name", hardForkName, "task_data", proverTaskSchema.TaskData)
return proverTaskSchema, nil
}

View File

@@ -1,6 +1,7 @@
package provertask
import (
"encoding/hex"
"errors"
"fmt"
"strings"
@@ -14,10 +15,13 @@ import (
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
"scroll-tech/coordinator/internal/utils"
)
var (
@@ -37,9 +41,10 @@ type ProverTask interface {
// BaseProverTask a base prover task which contain series functions
type BaseProverTask struct {
cfg *config.Config
chainCfg *params.ChainConfig
db *gorm.DB
cfg *config.Config
chainCfg *params.ChainConfig
db *gorm.DB
expectedVk map[string][]byte
batchOrm *orm.Batch
chunkOrm *orm.Chunk
@@ -56,10 +61,22 @@ type proverTaskContext struct {
ProverProviderType uint8
HardForkNames map[string]struct{}
taskType message.ProofType
chunkTask *orm.Chunk
batchTask *orm.Batch
bundleTask *orm.Bundle
taskType message.ProofType
chunkTask *orm.Chunk
batchTask *orm.Batch
bundleTask *orm.Bundle
hasAssignedTask *orm.ProverTask
}
func (b *BaseProverTask) version(hardForkName string) (uint8, error) {
return utils.Version(hardForkName, b.validiumMode())
}
// validiumMode induce different behavior in task generation:
// + skip the point_evaluation part in batch task
// + encode batch header with codec in utils instead of da-codec
func (b *BaseProverTask) validiumMode() bool {
return b.cfg.L2.ValidiumMode
}
// hardForkName get the chunk/batch/bundle hard fork name
@@ -122,7 +139,7 @@ func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTa
}
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
return "", errors.New("to be assigned prover task's hard-fork name is not the same as prover")
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
}
return hardForkName, nil
}
@@ -174,17 +191,56 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
assigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
}
if isAssigned {
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
ptc.hasAssignedTask = assigned
return &ptc, nil
}
func (b *BaseProverTask) applyUniversal(schema *coordinatorType.GetTaskSchema) (*coordinatorType.GetTaskSchema, []byte, error) {
expectedVk, ok := b.expectedVk[schema.HardForkName]
if !ok {
return nil, nil, fmt.Errorf("no expectedVk found from hardfork %s", schema.HardForkName)
}
var decryptionKey []byte
if b.cfg.L2.ValidiumMode {
var err error
decryptionKey, err = hex.DecodeString(b.cfg.Sequencer.DecryptionKey)
if err != nil {
return nil, nil, fmt.Errorf("sequencer decryption key hex-decoding failed")
}
}
ok, uTaskData, metadata, _ := libzkp.GenerateUniversalTask(schema.TaskType, schema.TaskData, schema.HardForkName, expectedVk, decryptionKey)
if !ok {
return nil, nil, fmt.Errorf("can not generate universal task, see coordinator log for the reason")
}
schema.TaskData = uTaskData
return schema, []byte(metadata), nil
}
const CompatibilityVersion = "4.5.43"
func isCompatibilityFixingVersion(ver string) bool {
return !version.CheckScrollRepoVersion(ver, CompatibilityVersion)
}
func fixCompatibility(schema *coordinatorType.GetTaskSchema) error {
fixedTask, err := libzkp.UniversalTaskCompatibilityFix(schema.TaskData)
if err != nil {
return err
}
schema.TaskData = fixedTask
return nil
}
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{

View File

@@ -19,6 +19,8 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
@@ -69,10 +71,18 @@ type ProofReceiverLogic struct {
validateFailureProverTaskStatusNotOk prometheus.Counter
validateFailureProverTaskTimeout prometheus.Counter
validateFailureProverTaskHaveVerifier prometheus.Counter
proverSpeed *prometheus.GaugeVec
provingTime prometheus.Gauge
evmCyclePerGas prometheus.Gauge
ChunkTask provertask.ProverTask
BundleTask provertask.ProverTask
BatchTask provertask.ProverTask
}
// NewSubmitProofReceiverLogic create a proof receiver logic
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
return &ProofReceiverLogic{
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
@@ -127,13 +137,25 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
Name: "coordinator_validate_failure_submit_have_been_verifier",
Help: "Total number of submit proof validate failure proof have been verifier.",
}),
evmCyclePerGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "evm_circuit_cycle_per_gas",
Help: "VM cycles cost for a gas unit cost in evm execution",
}),
provingTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "chunk_proving_time",
Help: "Wall clock time for chunk proving in second",
}),
proverSpeed: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "prover_speed",
Help: "Cycle against running time of prover (in mhz)",
}, []string{"type", "phase"}),
}
}
// HandleZkProof handle a ZkProof submitted from a prover.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error {
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) (rerr error) {
m.proofReceivedTotal.Inc()
pk := ctx.GetString(coordinatorType.PublicKey)
if len(pk) == 0 {
@@ -150,6 +172,18 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
return ErrValidatorFailureProverTaskEmpty
}
defer func() {
if rerr != nil && types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverAssigned {
// trigger a last-chance closing of current task if some routine had missed it
log.Warn("last chance proof recover triggerred",
"proofID", proofParameter.TaskID,
"err", rerr,
)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeUndefined, proofParameter)
}
}()
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
@@ -168,22 +202,66 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
if getHardForkErr != nil {
return ErrGetHardForkNameFailed
}
if proofParameter.Universal {
if len(proverTask.Metadata) == 0 {
return errors.New("can not re-wrapping proof: no metadata has been recorded in advance")
}
var expected_vk []byte
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:
expected_vk = m.verifier.ChunkVk[hardForkName]
case message.ProofTypeBatch:
expected_vk = m.verifier.BatchVk[hardForkName]
case message.ProofTypeBundle:
expected_vk = m.verifier.BundleVk[hardForkName]
}
if len(expected_vk) == 0 {
return errors.New("no vk specified match current hard fork, check your config")
}
proofParameter.Proof = libzkp.GenerateWrappedProof(proofParameter.Proof, string(proverTask.Metadata), expected_vk)
if proofParameter.Proof == "" {
return errors.New("can not re-wrapping proof, see coordinator log for reason")
}
}
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:
chunkProof := message.NewChunkProof(hardForkName)
chunkProof := &message.OpenVMChunkProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
if stat := chunkProof.VmProof.Stat; stat != nil {
if g, _ := m.proverSpeed.GetMetricWithLabelValues("chunk", "exec"); g != nil && stat.ExecutionTimeMills > 0 {
g.Set(float64(stat.TotalCycle) / float64(stat.ExecutionTimeMills*1000))
}
if g, _ := m.proverSpeed.GetMetricWithLabelValues("chunk", "proving"); g != nil && stat.ProvingTimeMills > 0 {
g.Set(float64(stat.TotalCycle) / float64(stat.ProvingTimeMills*1000))
}
if chunkProof.MetaData.TotalGasUsed > 0 {
cycle_per_gas := float64(stat.TotalCycle) / float64(chunkProof.MetaData.TotalGasUsed)
m.evmCyclePerGas.Set(cycle_per_gas)
}
m.provingTime.Set(float64(stat.ProvingTimeMills) / 1000)
}
case message.ProofTypeBatch:
batchProof := message.NewBatchProof(hardForkName)
batchProof := &message.OpenVMBatchProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
if stat := batchProof.VmProof.Stat; stat != nil {
if g, _ := m.proverSpeed.GetMetricWithLabelValues("batch", "exec"); g != nil && stat.ExecutionTimeMills > 0 {
g.Set(float64(stat.TotalCycle) / float64(stat.ExecutionTimeMills*1000))
}
if g, _ := m.proverSpeed.GetMetricWithLabelValues("batch", "proving"); g != nil && stat.ProvingTimeMills > 0 {
g.Set(float64(stat.TotalCycle) / float64(stat.ProvingTimeMills*1000))
}
}
case message.ProofTypeBundle:
bundleProof := message.NewBundleProof(hardForkName)
bundleProof := &message.OpenVMBundleProof{}
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
return unmarshalErr
}
@@ -245,6 +323,20 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
}
}()
// Internally we overide the timeout failure:
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, but we still accept it
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid &&
types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
proverTask.ProvingStatus = int16(types.ProverAssigned)
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Warn("proof submit proof have timeout", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
}
// Ensure this prover is eligible to participate in the prover task.
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid ||
types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofInvalid {
@@ -262,9 +354,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return ErrValidatorFailureProverTaskCannotSubmitTwice
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
if proofParameter.Status != int(coordinatorType.StatusOk) {
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
@@ -280,14 +369,6 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
return ErrValidatorFailureProofMsgStatusNotOk
}
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk,
@@ -302,6 +383,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil
}
@@ -318,7 +400,7 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureType(proverTask.FailureType), proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
return err
}
@@ -379,6 +461,9 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
if err != nil {
return err
}
// sync status and failture type into proverTask
proverTask.ProvingStatus = int16(status)
proverTask.FailureType = int16(failureType)
if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {

View File

@@ -9,32 +9,33 @@ import (
)
// NewVerifier Sets up a mock verifier.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
batchVKMap := map[string]struct{}{"mock_vk": {}}
chunkVKMap := map[string]struct{}{"mock_vk": {}}
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
func NewVerifier(cfg *config.VerifierConfig, _ bool) (*Verifier, error) {
return &Verifier{
cfg: cfg,
OpenVMVkMap: map[string]struct{}{"mock_vk": {}},
ChunkVk: map[string][]byte{"euclidV2": []byte("mock_vk")},
BatchVk: map[string][]byte{"euclidV2": []byte("mock_vk")},
BundleVk: map[string][]byte{},
}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBatchProof return a mock verification result for a BatchProof.
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBundleProof return a mock verification result for a BundleProof.
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
return true, nil
}

View File

@@ -7,11 +7,11 @@ import (
// InvalidTestProof invalid proof used in tests
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
// Verifier represents a rust ffi to a verifier.
type Verifier struct {
cfg *config.VerifierConfig
ChunkVKMap map[string]struct{}
BatchVKMap map[string]struct{}
BundleVkMap map[string]struct{}
OpenVMVkMap map[string]struct{}
ChunkVk map[string][]byte
BatchVk map[string][]byte
BundleVk map[string][]byte
}

View File

@@ -2,59 +2,71 @@
package verifier
/*
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "./lib/libzkp.h"
*/
import "C" //nolint:typecheck
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"path"
"unsafe"
"path/filepath"
"strings"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/libzkp"
"scroll-tech/coordinator/internal/utils"
)
// This struct maps to `CircuitConfig` in common/libzkp/impl/src/verifier.rs
// This struct maps to `CircuitConfig` in libzkp/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
Version uint `json:"version"`
ForkName string `json:"fork_name"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
Features string `json:"features,omitempty"`
}
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
var validiumMode bool
func newRustCircuitConfig(cfg config.AssetConfig) *rustCircuitConfig {
ver := cfg.Version
if ver == 0 {
var err error
ver, err = utils.Version(cfg.ForkName, validiumMode)
if err != nil {
panic(err)
}
}
return &rustCircuitConfig{
ForkName: cfg.ForkName,
ParamsPath: cfg.ParamsPath,
Version: uint(ver),
AssetsPath: cfg.AssetsPath,
ForkName: cfg.ForkName,
Features: cfg.Features,
}
}
// This struct maps to `VerifierConfig` in common/libzkp/impl/src/verifier.rs
// This struct maps to `VerifierConfig` in coordinator/internal/logic/libzkp/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
LowVersionCircuit *rustCircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
Circuits []*rustCircuitConfig `json:"circuits"`
}
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
return &rustVerifierConfig{
LowVersionCircuit: newRustCircuitConfig(cfg.LowVersionCircuit),
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
out := &rustVerifierConfig{}
for _, cfg := range cfg.Verifiers {
out.Circuits = append(out.Circuits, newRustCircuitConfig(cfg))
}
return out
}
type rustVkDump struct {
@@ -64,187 +76,103 @@ type rustVkDump struct {
}
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
chunkVKMap := map[string]struct{}{"mock_vk": {}}
batchVKMap := map[string]struct{}{"mock_vk": {}}
bundleVKMap := map[string]struct{}{"mock_vk": {}}
openVMVkMap := map[string]struct{}{"mock_vk": {}}
return &Verifier{
cfg: cfg,
ChunkVKMap: chunkVKMap,
BatchVKMap: batchVKMap,
BundleVkMap: bundleVKMap,
OpenVMVkMap: openVMVkMap,
}, nil
}
func NewVerifier(cfg *config.VerifierConfig, useValidiumMode bool) (*Verifier, error) {
validiumMode = useValidiumMode
verifierConfig := newRustVerifierConfig(cfg)
configBytes, err := json.Marshal(verifierConfig)
if err != nil {
return nil, err
}
configStr := C.CString(string(configBytes))
defer func() {
C.free(unsafe.Pointer(configStr))
}()
C.init(configStr)
libzkp.InitVerifier(string(configBytes))
v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]struct{}),
BatchVKMap: make(map[string]struct{}),
BundleVkMap: make(map[string]struct{}),
OpenVMVkMap: make(map[string]struct{}),
ChunkVk: make(map[string][]byte),
BatchVk: make(map[string][]byte),
BundleVk: make(map[string][]byte),
}
if err := v.loadLowVersionVKs(cfg); err != nil {
return nil, err
for _, cfg := range cfg.Verifiers {
if err := v.loadOpenVMVks(cfg); err != nil {
return nil, err
}
}
if err := v.loadOpenVMVks(cfg.HighVersionCircuit.ForkName); err != nil {
return nil, err
}
v.loadCurieVersionVKs()
return v, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, batch verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Verifier.
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
}
log.Info("Start to verify batch proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_batch_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyBatchProof(string(buf), forkName), nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
}
log.Info("Start to verify chunk proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_chunk_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyChunkProof(string(buf), forkName), nil
}
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
buf, err := json.Marshal(proof)
if err != nil {
return false, err
}
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
log.Info("Start to verify bundle proof ...")
verified := C.verify_bundle_proof(proofStr, forkNameStr)
return verified != 0, nil
return libzkp.VerifyBundleProof(string(buf), forkName), nil
}
func (v *Verifier) readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
if err != nil {
return "", err
/*
add vk of imcompatilbe circuit app here to avoid we had used them unexpectedly
25/07/15: 0.5.0rc0 is no longer compatible since a breaking change
*/
const blocked_vks = `
rSJNNBpsxBdKlstbIIU/aYc7bHau98Qb2yjZMc5PmDhmGOolp5kYRbvF/VcWcO5HN5ujGs6S00W8pZcCoNQRLQ==,
2Lo7Cebm6SFtcsYXipkcMxIBmVY7UpoMXik/Msm7t2nyvi9EaNGsSnDnaCurscYEF+IcdjPUtVtY9EcD7IKwWg==,
D6YFHwTLZF/U2zpYJPQ3LwJZRm85yA5Vq2iFBqd3Mk4iwOUpS8sbOp3vg2+NDxhhKphgYpuUlykpdsoRhEt+cw==,
`
// tries to decode s as hex, and if that fails, as base64.
func decodeVkString(s string) ([]byte, error) {
// Try hex decoding first
if b, err := hex.DecodeString(s); err == nil {
return b, nil
}
byt, err := io.ReadAll(f)
// Fallback to base64 decoding
b, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return "", err
return nil, err
}
return base64.StdEncoding.EncodeToString(byt), nil
if len(b) == 0 {
return nil, fmt.Errorf("decode vk string %s fail (empty bytes)", s)
}
return b, nil
}
// load low version vks, current is darwin
func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error {
bundleVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_bundle.vkey"))
if err != nil {
return err
func (v *Verifier) loadOpenVMVks(cfg config.AssetConfig) error {
vkFileName := cfg.Vkfile
if vkFileName == "" {
vkFileName = "openVmVk.json"
}
batchVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_batch.vkey"))
if err != nil {
return err
}
chunkVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_chunk.vkey"))
if err != nil {
return err
}
v.BundleVkMap[bundleVK] = struct{}{}
v.BatchVKMap[batchVK] = struct{}{}
v.ChunkVKMap[chunkVK] = struct{}{}
return nil
}
vkFile := path.Join(cfg.AssetsPath, vkFileName)
func (v *Verifier) loadCurieVersionVKs() {
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD9jfGkei+f0wNYpkjW7JO12EfU7CjYVBo+PGku3zaQJI64lbn6BwyTBa4RfrPFpV5mP47ix0sXZ+Wt5wklMLRW7OIJb1yfCDm+gkSsp3/Zqrxt4SY4rQ4WtHfynTCQ0KDi78jNuiFvwxO3ub3DkgGVaxMkGxTRP/Vz6E7MCZMUBR5wZFcMzJn+73f0wYjDxfj00krg9O1VrwVxbVV1ycLR6oQLcOgm/l+xwth8io0vDpF9OY21gD5DgJn9GgcYe8KoRVEbEqApLZPdBibpcSMTY9czZI2LnFcqrDDmYvhEwgjhZrsTog2xLXOODoOupZ/is5ekQ9Gi0y871b1mLlCGA="] = struct{}{}
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
}
func (v *Verifier) loadOpenVMVks(forkName string) error {
tempFile := path.Join(os.TempDir(), "openVmVk.json")
defer func() {
if err := os.Remove(tempFile); err != nil {
log.Error("failed to remove temp file", "err", err)
}
}()
forkNameCStr := C.CString(forkName)
defer C.free(unsafe.Pointer(forkNameCStr))
tempFileCStr := C.CString(tempFile)
defer C.free(unsafe.Pointer(tempFileCStr))
C.dump_vk(forkNameCStr, tempFileCStr)
f, err := os.Open(tempFile)
f, err := os.Open(filepath.Clean(vkFile))
if err != nil {
return err
}
@@ -257,8 +185,36 @@ func (v *Verifier) loadOpenVMVks(forkName string) error {
if err := json.Unmarshal(byt, &dump); err != nil {
return err
}
if strings.Contains(blocked_vks, dump.Chunk) {
return fmt.Errorf("loaded blocked chunk vk %s", dump.Chunk)
}
if strings.Contains(blocked_vks, dump.Batch) {
return fmt.Errorf("loaded blocked batch vk %s", dump.Batch)
}
if strings.Contains(blocked_vks, dump.Bundle) {
return fmt.Errorf("loaded blocked bundle vk %s", dump.Bundle)
}
v.OpenVMVkMap[dump.Chunk] = struct{}{}
v.OpenVMVkMap[dump.Batch] = struct{}{}
v.OpenVMVkMap[dump.Bundle] = struct{}{}
log.Info("Load vks", "from", cfg.AssetsPath, "chunk", dump.Chunk, "batch", dump.Batch, "bundle", dump.Bundle)
decodedBytes, err := decodeVkString(dump.Chunk)
if err != nil {
return err
}
v.ChunkVk[cfg.ForkName] = decodedBytes
decodedBytes, err = decodeVkString(dump.Batch)
if err != nil {
return err
}
v.BatchVk[cfg.ForkName] = decodedBytes
decodedBytes, err = decodeVkString(dump.Bundle)
if err != nil {
return err
}
v.BundleVk[cfg.ForkName] = decodedBytes
return nil
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
@@ -29,62 +29,54 @@ func TestFFI(t *testing.T) {
as := assert.New(t)
cfg := &config.VerifierConfig{
MockMode: false,
LowVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathLo,
ForkName: "darwin",
MinProverVersion: "",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathHi,
ForkName: "darwinV2",
MinProverVersion: "",
},
MinProverVersion: "",
Verifiers: []config.AssetConfig{{
AssetsPath: *assetsPathHi,
ForkName: "euclidV2",
}},
}
v, err := NewVerifier(cfg)
as.NoError(err)
chunkProof1 := readChunkProof(*chunkProofPath1, as)
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "darwinV2")
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "euclidV2")
as.NoError(err)
as.True(chunkOk1)
t.Log("Verified chunk proof 1")
chunkProof2 := readChunkProof(*chunkProofPath2, as)
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "darwinV2")
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "euclidV2")
as.NoError(err)
as.True(chunkOk2)
t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof, "darwinV2")
batchOk, err := v.VerifyBatchProof(batchProof, "euclidV2")
as.NoError(err)
as.True(batchOk)
t.Log("Verified batch proof")
}
func readBatchProof(filePat string, as *assert.Assertions) types.BatchProof {
func readBatchProof(filePat string, as *assert.Assertions) *message.OpenVMBatchProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.Halo2BatchProof{}
proof := &message.OpenVMBatchProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof
}
func readChunkProof(filePat string, as *assert.Assertions) types.ChunkProof {
func readChunkProof(filePat string, as *assert.Assertions) *message.OpenVMChunkProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.Halo2ChunkProof{}
proof := &message.OpenVMChunkProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof

View File

@@ -19,20 +19,23 @@ type Batch struct {
db *gorm.DB `gorm:"column:-"`
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
PrevL1MessageQueueHash string `json:"prev_l1_message_queue_hash" gorm:"column:prev_l1_message_queue_hash"`
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
ChallengeDigest string `json:"challenge_digest" gorm:"column:challenge_digest"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`

View File

@@ -37,6 +37,7 @@ type ProverTask struct {
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward decimal.Decimal `json:"reward" gorm:"column:reward;default:0;type:decimal(78)"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
Metadata []byte `json:"metadata" gorm:"column:metadata;default:NULL"`
AssignedAt time.Time `json:"assigned_at" gorm:"assigned_at"`
// metadata
@@ -56,17 +57,17 @@ func (*ProverTask) TableName() string {
}
// IsProverAssigned checks if a prover with the given public key has been assigned a task.
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bool, error) {
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
var task ProverTask
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return false, nil
return nil, nil
}
return false, err
return nil, err
}
return true, nil
return &task, nil
}
// GetProverTasks get prover tasks
@@ -268,6 +269,24 @@ func (o *ProverTask) UpdateProverTaskProvingStatusAndFailureType(ctx context.Con
return nil
}
// UpdateProverTaskAssignedTime updates the assigned_at time of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskAssignedTime(ctx context.Context, uuid uuid.UUID, t time.Time, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("uuid = ?", uuid)
updates := make(map[string]interface{})
updates["assigned_at"] = t
if err := db.Updates(updates).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskAssignedTime error: %w, uuid:%s, status: %v", err, uuid, t)
}
return nil
}
// UpdateProverTaskFailureType update the prover task failure type
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, uuid uuid.UUID, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db

View File

@@ -22,7 +22,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
ProverVersion: "v0.0.1",
Challenge: "abcdef",
ProverProviderType: ProverProviderTypeInternal,
ProverTypes: []ProverType{ProverTypeBatch},
ProverTypes: []ProverType{ProverTypeOpenVM},
VKs: []string{"vk1", "vk2"},
},
PublicKey: publicKeyHex,
@@ -64,7 +64,7 @@ func TestGenerateSignature(t *testing.T) {
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
ProverProviderType: ProverProviderTypeInternal,
ProverTypes: []ProverType{ProverTypeChunk},
ProverTypes: []ProverType{ProverTypeOpenVM},
VKs: []string{"mock_vk"},
},
PublicKey: publicKeyHex,

View File

@@ -4,6 +4,8 @@ package types
type GetTaskParameter struct {
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskTypes []int `form:"task_types" json:"task_types"`
TaskID string `form:"task_id,omitempty" json:"task_id,omitempty"`
Universal bool `form:"universal,omitempty" json:"universal,omitempty"`
}
// GetTaskSchema the schema data return to prover for get prover task
@@ -11,6 +13,7 @@ type GetTaskSchema struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
UseSnark bool `json:"use_snark,omitempty"`
TaskData string `json:"task_data"`
HardForkName string `json:"hard_fork_name"`
}

View File

@@ -21,10 +21,10 @@ type ProverType uint8
func (r ProverType) String() string {
switch r {
case ProverTypeChunk:
return "prover type chunk"
case ProverTypeBatch:
return "prover type batch"
case ProverTypeChunkDeprecated:
return "prover type chunk (deprecated)"
case ProverTypeBatchDeprecated:
return "prover type batch (deprecated)"
case ProverTypeOpenVM:
return "prover type openvm"
default:
@@ -35,10 +35,10 @@ func (r ProverType) String() string {
const (
// ProverTypeUndefined is an unknown prover type
ProverTypeUndefined ProverType = iota
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks
ProverTypeChunk
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks
ProverTypeBatch
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks, which is deprecated
ProverTypeChunkDeprecated
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks, which is deprecated
ProverTypeBatchDeprecated
// ProverTypeOpenVM
ProverTypeOpenVM
)
@@ -47,9 +47,9 @@ const (
func MakeProverType(proofType message.ProofType) ProverType {
switch proofType {
case message.ProofTypeChunk:
return ProverTypeChunk
return ProverTypeChunkDeprecated
case message.ProofTypeBatch, message.ProofTypeBundle:
return ProverTypeBatch
return ProverTypeBatchDeprecated
default:
return ProverTypeUndefined
}

View File

@@ -7,6 +7,7 @@ type SubmitProofParameter struct {
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`
Proof string `form:"proof" json:"proof"`
Universal bool `form:"universal,omitempty" json:"universal,omitempty"`
FailureType int `form:"failure_type" json:"failure_type"`
FailureMsg string `form:"failure_msg" json:"failure_msg"`
}

View File

@@ -0,0 +1,91 @@
package utils
import (
"encoding/binary"
"fmt"
"github.com/scroll-tech/go-ethereum/common"
)
type CodecVersion uint8
const (
daBatchValidiumEncodedLength = 137
)
type DABatch interface {
SetHash(common.Hash)
}
type daBatchValidiumV1 struct {
Version CodecVersion `json:"version"`
BatchIndex uint64 `json:"batch_index"`
ParentBatchHash common.Hash `json:"parent_batch_hash"`
PostStateRoot common.Hash `json:"post_state_root"`
WithDrawRoot common.Hash `json:"withdraw_root"`
Commitment common.Hash `json:"commitment"`
}
type daBatchValidium struct {
V1 *daBatchValidiumV1 `json:"V1,omitempty"`
BatchHash common.Hash `json:"batch_hash"`
}
func (da *daBatchValidium) SetHash(h common.Hash) {
da.BatchHash = h
}
func FromVersion(v uint8) CodecVersion {
return CodecVersion(v & STFVersionMask)
}
func (c CodecVersion) DABatchForTaskFromBytes(b []byte) (DABatch, error) {
switch c {
case 1:
if v1, err := decodeDABatchV1(b); err == nil {
return &daBatchValidium{
V1: v1,
}, nil
} else {
return nil, err
}
default:
return nil, fmt.Errorf("unknown codec type %d", c)
}
}
func decodeDABatchV1(data []byte) (*daBatchValidiumV1, error) {
if len(data) != daBatchValidiumEncodedLength {
return nil, fmt.Errorf("invalid data length for DABatchV7, expected %d bytes but got %d", daBatchValidiumEncodedLength, len(data))
}
const (
versionSize = 1
indexSize = 8
hashSize = 32
)
// Offsets (same as encodeBatchHeaderValidium)
versionOffset := 0
indexOffset := versionOffset + versionSize
parentHashOffset := indexOffset + indexSize
stateRootOffset := parentHashOffset + hashSize
withdrawRootOffset := stateRootOffset + hashSize
commitmentOffset := withdrawRootOffset + hashSize
version := CodecVersion(data[versionOffset])
batchIndex := binary.BigEndian.Uint64(data[indexOffset : indexOffset+indexSize])
parentBatchHash := common.BytesToHash(data[parentHashOffset : parentHashOffset+hashSize])
postStateRoot := common.BytesToHash(data[stateRootOffset : stateRootOffset+hashSize])
withdrawRoot := common.BytesToHash(data[withdrawRootOffset : withdrawRootOffset+hashSize])
commitment := common.BytesToHash(data[commitmentOffset : commitmentOffset+hashSize])
return &daBatchValidiumV1{
Version: version,
BatchIndex: batchIndex,
ParentBatchHash: parentBatchHash,
PostStateRoot: postStateRoot,
WithDrawRoot: withdrawRoot,
Commitment: commitment,
}, nil
}

View File

@@ -0,0 +1,42 @@
package utils
import (
"errors"
"strings"
)
const (
DomainOffset = 6
STFVersionMask = (1 << DomainOffset) - 1
)
// version get the version for the chain instance
//
// TODO: This is not foolproof and does not cover all scenarios.
func Version(hardForkName string, ValidiumMode bool) (uint8, error) {
var domain, stfVersion uint8
if ValidiumMode {
domain = 1
stfVersion = 1
} else {
domain = 0
switch canonicalName := strings.ToLower(hardForkName); canonicalName {
case "euclidv1":
stfVersion = 6
case "euclidv2":
stfVersion = 7
case "feynman":
stfVersion = 8
case "galileo":
stfVersion = 9
case "galileov2":
stfVersion = 10
default:
return 0, errors.New("unknown fork name " + canonicalName)
}
}
return (domain << DomainOffset) + stfVersion, nil
}

View File

@@ -67,7 +67,7 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, forks []string) (*cron.Collector, *http.Server) {
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
var err error
db, err = testApps.GetGormDBClient()
@@ -79,24 +79,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
tokenTimeout = 60
conf = &config.Config{
L2: &config.L2{
ChainID: 111,
ChainID: 111,
Endpoint: &config.L2Endpoint{},
},
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{
MockMode: true,
LowVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "homestead",
MinProverVersion: "v4.2.0",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "bernoulli",
MinProverVersion: "v4.3.0",
},
MinProverVersion: "v4.4.89",
Verifiers: []config.AssetConfig{{
AssetsPath: "",
ForkName: "euclidV2",
}},
},
BatchCollectionTimeSec: 10,
ChunkCollectionTimeSec: 10,
@@ -109,20 +102,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
},
}
var chainConf params.ChainConfig
for _, forkName := range forks {
switch forkName {
case "bernoulli":
chainConf.BernoulliBlock = big.NewInt(100)
case "homestead":
chainConf.HomesteadBlock = big.NewInt(0)
}
}
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
router := gin.New()
api.InitController(conf, &chainConf, db, nil)
api.InitController(conf, &params.ChainConfig{
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
DarwinTime: new(uint64),
DarwinV2Time: new(uint64),
EuclidTime: new(uint64),
EuclidV2Time: new(uint64),
}, db, nil)
route.Route(router, conf, nil)
srv := &http.Server{
Addr: coordinatorURL,
@@ -142,7 +132,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
func setEnv(t *testing.T) {
var err error
version.Version = "v4.2.0"
version.Version = "v4.5.45"
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
@@ -198,7 +188,7 @@ func TestApis(t *testing.T) {
func testHandshake(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -211,7 +201,7 @@ func testHandshake(t *testing.T) {
func testFailedHandshake(t *testing.T) {
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
proofCollector.Stop()
}()
@@ -229,7 +219,7 @@ func testFailedHandshake(t *testing.T) {
func testGetTaskBlocked(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -273,7 +263,7 @@ func testGetTaskBlocked(t *testing.T) {
func testOutdatedProverVersion(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -285,14 +275,12 @@ func testOutdatedProverVersion(t *testing.T) {
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, chunkProver.proverVersion)
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
assert.Equal(t, types.ErrJWTCommonErr, code)
assert.Equal(t, expectedErr, errors.New(errMsg))
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, batchProver.proverVersion)
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
assert.Equal(t, types.ErrJWTCommonErr, code)
assert.Equal(t, expectedErr, errors.New(errMsg))
@@ -300,7 +288,7 @@ func testOutdatedProverVersion(t *testing.T) {
func testValidProof(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -383,7 +371,7 @@ func testValidProof(t *testing.T) {
func testInvalidProof(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -471,7 +459,7 @@ func testInvalidProof(t *testing.T) {
func testProofGeneratedFailed(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -572,7 +560,7 @@ func testProofGeneratedFailed(t *testing.T) {
func testTimeoutProof(t *testing.T) {
// Setup coordinator and ws server.
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"darwinV2"})
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -595,7 +583,10 @@ func testTimeoutProof(t *testing.T) {
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
assert.NoError(t, err)
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
TotalGasUsed uint64 `json:"chunk_total_gas"`
}{ChunkInfo: &message.ChunkInfo{}}})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1)

View File

@@ -161,7 +161,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}}).
SetBody(map[string]interface{}{"universal": true, "prover_height": 100, "task_types": []int{int(proofType)}}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -191,7 +191,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "universal": true}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -207,32 +207,33 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
var proof []byte
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
encodeData, err := json.Marshal(message.Halo2ChunkProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
encodeData, err := json.Marshal(message.Halo2BatchProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
if proofStatus == verifiedFailed {
switch proverTaskSchema.TaskType {
case int(message.ProofTypeChunk):
chunkProof := message.Halo2ChunkProof{}
chunkProof.RawProof = []byte(verifier.InvalidTestProof)
encodeData, err := json.Marshal(&chunkProof)
if proofStatus != verifiedFailed {
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
fallthrough
case message.ProofTypeBatch:
encodeData, err := json.Marshal(&message.OpenVMProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
batchProof := message.Halo2BatchProof{}
batchProof.RawProof = []byte(verifier.InvalidTestProof)
encodeData, err := json.Marshal(&batchProof)
case message.ProofTypeBundle:
encodeData, err := json.Marshal(&message.OpenVMEvmProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
} else {
// in "verifiedFailed" status, we purpose the mockprover submit proof but not valid
switch message.ProofType(proverTaskSchema.TaskType) {
case message.ProofTypeChunk:
fallthrough
case message.ProofTypeBatch:
encodeData, err := json.Marshal(&message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case message.ProofTypeBundle:
encodeData, err := json.Marshal(&message.OpenVMEvmProof{Proof: []byte(verifier.InvalidTestProof)})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
@@ -240,11 +241,12 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
}
submitProof := types.SubmitProofParameter{
UUID: proverTaskSchema.UUID,
TaskID: proverTaskSchema.TaskID,
TaskType: proverTaskSchema.TaskType,
Status: int(proofMsgStatus),
Proof: string(proof),
UUID: proverTaskSchema.UUID,
TaskID: proverTaskSchema.TaskID,
TaskType: proverTaskSchema.TaskType,
Status: int(proofMsgStatus),
Proof: string(proof),
Universal: true,
}
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))})

27
crates/l2geth/Cargo.toml Normal file
View File

@@ -0,0 +1,27 @@
[package]
name = "l2geth"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = {version = "1", features = ["rt-multi-thread"]}
async-trait = "0.1"
url = ">=2.5.3"
libzkp = { path = "../libzkp" }
alloy = { workspace = true, features = ["provider-http", "transport-http", "reqwest", "reqwest-rustls-tls", "json-rpc"] }
sbv-primitives = { workspace = true, features = ["scroll"] }
sbv-utils = { workspace = true, features = ["scroll"] }
sbv-core = { workspace = true, features = ["scroll"] }
eyre.workspace = true
base64.workspace = true
serde.workspace = true
serde_derive.workspace = true
serde_json = { workspace = true, features = ["raw_value"]}
tracing.workspace = true

Some files were not shown because too many files have changed in this diff Show More