Compare commits

...

56 Commits

Author SHA1 Message Date
Mengran Lan
dbaf8531b1 upgrade snark-verifier-sdk 2024-05-30 22:35:56 +08:00
Mengran Lan
0ca302a654 upgrade to rc4 2024-05-30 22:09:11 +08:00
Mengran Lan
124d10820a enable gzip when sending http request 2024-05-30 14:44:18 +08:00
Mengran Lan
614a894aa1 add sleep logic when failed to fetch task from coordinator 2024-05-28 15:19:54 +08:00
Mengran Lan
c4f54da7ca upgrade zk-circuits to v0.11.0rc2 2024-05-28 10:31:58 +08:00
Mengran Lan
46f5849ae0 comment types for next 2024-05-27 17:24:31 +08:00
Mengran Lan
45d8f66864 update cargo depends 2024-05-27 17:19:02 +08:00
Mengran Lan
94e1ea3a08 change prover_next version to fit the e2e test 2024-05-27 16:53:44 +08:00
Mengran Lan
62c1f00d3b copy libzktrie.so to lib dir 2024-05-27 15:54:49 +08:00
Mengran Lan
038d7a5bbf tmp commit, test next handler wrapper logic (set next handler as default) 2024-05-26 23:13:11 +08:00
Mengran Lan
112e9ac42b add task_cache logic 2024-05-24 13:07:10 +08:00
Mengran Lan
728266ebad add info logs for circuits handler 2024-05-23 11:31:09 +08:00
Mengran Lan
7b8f30d230 add second zkevm-handler && add proof_check when proving batch 2024-05-22 18:47:06 +08:00
Mengran Lan
69ca648c83 utilize proof_status logic 2024-05-22 15:50:06 +08:00
Mengran Lan
00a07a8258 build using --rlease && fix bug in proof status 2024-05-22 11:41:28 +08:00
Mengran Lan
f87e5b5ca7 fix bug, action not taken if re-login to coordinator 2024-05-21 23:48:29 +08:00
Mengran Lan
7b848f971b fmt code 2024-05-21 12:08:31 +08:00
Mengran Lan
49166ec8d0 change l2geth config to option 2024-05-21 12:06:55 +08:00
Mengran Lan
2d0c36eb5a geth client add tokio runtime 2024-05-20 22:47:46 +08:00
Mengran Lan
445a8d592a unify coordinator client api, add logs 2024-05-20 19:03:18 +08:00
Mengran Lan
eadc51d33b set vk in get task request 2024-05-20 16:18:00 +08:00
Mengran Lan
254a7faf58 init the log; add tokio runtime 2024-05-20 16:15:06 +08:00
Mengran Lan
173cbc4dc4 first compile-ready version 2024-05-16 11:17:16 +08:00
Mengran Lan
94bd5917ba finish most logic, leaving some rust-style compiler issue to be solved 2024-05-15 14:28:01 +08:00
Mengran Lan
107aa5792b tmp save 2024-05-13 15:59:06 +08:00
lugosi
a1a7f25921 build: update golangci-lint (#1321)
Co-authored-by: kongfanfu <kongfanfu@bytedance.com>
2024-05-06 09:47:41 +08:00
JayLiu
8be70f0c80 fix start testcontainers fail bug (#1313) 2024-05-02 21:12:20 +08:00
JayLiu
d1bec53e50 test: replace l1GethContainer with poSL1Container (#1312)
Co-authored-by: TKTech660 <liujay48@gmail.com>
2024-04-30 16:25:22 +08:00
SamiAlHassan
0723b463c5 build: upgrade go-ethereum from scroll-v5.1.6 to scroll-v5.3.0 (#1304)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-30 16:25:12 +08:00
Xin.Zh
46b1ff3284 modify testcontainer (#1302)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-30 14:44:26 +08:00
alwayshang
a3635dba52 docs(coordinator): fix function name (#1297)
Signed-off-by: alwayshang <zhanghonghao@outlook.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-30 11:32:48 +08:00
colin
34ad8ca772 fix(batch-proposer): potential panic risk (#1311) 2024-04-30 08:46:13 +08:00
yanziseeker
8a2a2eb292 docs:clean repetitive words (#1309) 2024-04-28 16:42:48 +08:00
Andi
8ca89374a0 chore: remove repetitive words (#1289)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-28 14:10:21 +08:00
Xi Lin
8128526116 feat(contracts): batch token bridge (#1282) 2024-04-28 12:11:15 +08:00
Péter Garamvölgyi
9262e9af69 Reapply "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 5090b77655.
2024-04-28 11:25:10 +08:00
Péter Garamvölgyi
5090b77655 Revert "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 8f8f6eb1a1.
2024-04-28 10:54:17 +08:00
colin
4cafc9349a feat(gas-oracle): tweak gas price update logic (#1305) 2024-04-28 10:47:04 +08:00
colin
ca6f856372 docs: remove local testing image Dockerfile and docs for mac M1/M2 silicon (#1300) 2024-04-25 08:32:57 +08:00
Mengran Lan
72ee087f35 fix(db): change prover_block_list's index on public_key from non-unique to unique (#1294)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-24 11:00:09 +08:00
sbaizet
8f8f6eb1a1 build(ci): make all intermediate images compatible with multi platforms (#1291) 2024-04-23 17:20:03 +08:00
Péter Garamvölgyi
c56bda9f47 feat(gas-oracle): relay blob base fee after Bernoulli block (#1293) 2024-04-23 16:53:39 +08:00
Péter Garamvölgyi
433d5c2f52 feat: publish rollup-db-cli image (#1280)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-22 20:19:15 +08:00
georgehao
69c0f7ed75 feat: upgarde gomonkey version (#1290) 2024-04-22 16:03:41 +08:00
colin
c25b827666 docs: add bridge-history deployment in readme (#1281)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-22 14:38:39 +08:00
georgehao
da4f6818e3 feat: upgrade scroll to go1.21 (#1285)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-22 14:35:50 +08:00
georgehao
200ca7c15b fix(coordinator): fix coordinator cron exit issue (#1286)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-22 09:26:53 +08:00
Mengran Lan
53cf26597d perf(db): add indexes for table prover_task, columns respectively: task_id, created_at (#1283) 2024-04-19 16:54:13 +08:00
sbaizet
f0f7341271 ci - Docker support for arm64 on all services (#1278) 2024-04-16 10:06:29 +02:00
sbaizet
b6af88c936 ci - Docker support for arm64 (#1276) 2024-04-15 16:18:25 +02:00
sbaizet
de541a650a ci - fix github action to support arm64 platform (#1275) 2024-04-15 15:35:18 +02:00
colin
d7a57235d3 fix(rollup-relayer): tweak logs (#1274)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-15 14:07:26 +08:00
sbaizet
91d21301ec ci: add support for arm64 on event-watcher images (#1269) 2024-04-14 16:27:59 +08:00
Daniel Helm
4b32a44a70 docs(coordinator): fix internal links to config files in README (#1272) 2024-04-14 16:27:17 +08:00
georgehao
55b400c5fb fix: rollup make lint failure (#1273) 2024-04-14 16:26:35 +08:00
JayLiu
1b49091207 test: fix testcontainers listen ports (#1270)
Co-authored-by: liuyuecai <liuyuecai1995@gmail.com>
2024-04-13 13:30:22 +08:00
119 changed files with 9545 additions and 472 deletions

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -33,13 +33,13 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
@@ -54,7 +54,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
@@ -95,7 +95,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc

View File

@@ -46,6 +46,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -90,6 +91,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -134,6 +136,52 @@ jobs:
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: rollup-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: rollup-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/db_cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -178,6 +226,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -222,6 +271,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -266,6 +316,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -310,6 +361,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}

View File

@@ -7,12 +7,12 @@ on:
description: 'Go version'
required: true
type: string
default: '1.20'
default: '1.21'
RUST_VERSION:
description: 'Rust toolchain version'
required: true
type: string
default: 'nightly-2022-12-10'
default: 'nightly-2023-12-03'
PYTHON_VERSION:
description: 'Python version'
required: false
@@ -29,31 +29,191 @@ defaults:
working-directory: 'build/dockerfiles/intermediate'
jobs:
build-and-push:
build-and-publish-cuda-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build
run: |
make all
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Publish
run: |
make publish
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/cuda-go-rust-builder.Dockerfile
tags: scrolltech/cuda-go-rust-builder:cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-alpine-builder:${{ github.event.inputs.GO_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-alpine-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-alpine-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-py-runner:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/py-runner.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/py-runner:${{ github.event.inputs.PYTHON_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}

View File

@@ -50,7 +50,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go

2
.gitignore vendored
View File

@@ -20,3 +20,5 @@ coverage.txt
# misc
sftp-config.json
*~
target

View File

@@ -1,6 +1,6 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.1.6
L2GETH_TAG=scroll-v5.3.0
help: ## Display this help message
@grep -h \
@@ -47,11 +47,5 @@ dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app -e HOST_PATH=$(PWD) scroll_test_image
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -43,8 +43,6 @@ make dev_docker
## Testing Rollup & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
```bash
@@ -54,39 +52,6 @@ go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
### For Apple Silicon (M1/M2) Macs
To run tests on Apple Silicon Macs, build and execute the Docker image as outlined below:
#### Build a Docker Image for Testing
Use the following command to build a Docker image:
```bash
make build_test_docker
```
This command builds a Docker image named `scroll_test_image` using the Dockerfile found at `./build/dockerfiles/local_test.Dockerfile`.
#### Run Docker Image
After the image is built, run a Docker container from it:
```bash
make run_test_docker
```
This command runs a Docker container named `scroll_test_container` from the `scroll_test_image` image. The container uses the host network and has access to the Docker socket and the current directory.
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## Testing Contracts
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).

View File

@@ -79,3 +79,50 @@ provides REST APIs. Please refer to the API details below.
// @Success 200
// @Router /api/txsbyhashes [post]
```
## Running bridge-history-api locally
1. Pull the latest Redis image:
```
docker pull redis:latest
```
2. Run the Redis container:
```
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
```
3. Pull the latest PostgreSQL image:
```
docker pull postgres:latest
```
4. Run the PostgreSQL container:
```
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
```
5. Run database migrations to initialize the tables:
```
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli migrate
```
6. Run bridgehistoryapi-fetcher:
```
make bridgehistoryapi-fetcher
./build/bin/bridgehistoryapi-fetcher
```
7. Run bridgehistoryapi-api:
```
make bridgehistoryapi-api
./build/bin/bridgehistoryapi-api
```
The endpoints provided in [./conf/config.json](./conf/config.json) are all public endpoints and have rate limits.
For production usage:
- For L1 endpoints, utilizing a service provider's free tier should suffice.
- For L2 endpoints, consider [running a Scroll L2geth node](https://docs.scroll.xyz/en/developers/guides/running-a-scroll-node) and using the exposed HTTP port.

View File

@@ -15,6 +15,7 @@
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
"PufferGatewayAddr": "0xA033Ff09f2da45f0e9ae495f525363722Df42b2a",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
@@ -34,7 +35,9 @@
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79"
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",

View File

@@ -104,10 +104,12 @@ linters-settings:
# minimal occurrences count to trigger, 3 by default
min-occurrences: 3
depguard:
list-type: blacklist
include-go-root: false
packages:
- github.com/davecgh/go-spew/spew
rules:
main:
files:
- $all
deny:
- pkg: "github.com/davecgh/go-spew/spew"
misspell:
# Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English.

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY ./bridge-history-api/go.* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
WORKDIR app
FROM chef as planner
@@ -17,7 +17,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,8 +1,8 @@
ifeq ($(GO_VERSION),)
GO_VERSION=1.20
GO_VERSION=1.21
endif
ifeq ($(RUST_VERSION),)
RUST_VERSION=nightly-2022-12-10
RUST_VERSION=nightly-2023-12-03
endif
ifeq ($(PYTHON_VERSION),)
PYTHON_VERSION=3.10

View File

@@ -1,6 +1,6 @@
ARG CUDA_VERSION=11.7.1
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04

View File

@@ -1,4 +1,4 @@
ARG GO_VERSION=1.20
ARG GO_VERSION=1.21
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -1,5 +1,5 @@
ARG ALPINE_VERSION=3.15
ARG RUST_VERSION=nightly-2022-12-10
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM alpine:${ALPINE_VERSION}

View File

@@ -1,4 +1,4 @@
ARG RUST_VERSION=nightly-2022-12-10
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -1,11 +0,0 @@
# Start from the latest golang base image
FROM golang:1.21
# Install Docker
RUN apt-get update && apt-get install -y docker.io docker-compose
# Set the working directory
WORKDIR /go/src/app
# This container will be executable
ENTRYPOINT [ "/bin/bash" ]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -15,7 +15,7 @@ import (
const (
// GolangCIVersion to be used for linting.
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2"
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.57.2"
)
// GOBIN environment variable.

View File

@@ -1,127 +0,0 @@
package dockercompose
import (
"context"
"crypto/rand"
"fmt"
"math/big"
"os"
"path/filepath"
"time"
"github.com/cloudflare/cfssl/log"
"github.com/scroll-tech/go-ethereum/ethclient"
tc "github.com/testcontainers/testcontainers-go/modules/compose"
"github.com/testcontainers/testcontainers-go/wait"
)
// PoSL1TestEnv represents the config needed to test in PoS Layer 1.
type PoSL1TestEnv struct {
dockerComposeFile string
compose tc.ComposeStack
gethHTTPPort int
hostPath string
}
// NewPoSL1TestEnv creates and initializes a new instance of PoSL1TestEnv with a random HTTP port.
func NewPoSL1TestEnv() (*PoSL1TestEnv, error) {
rootDir, err := findProjectRootDir()
if err != nil {
return nil, fmt.Errorf("failed to find project root directory: %v", err)
}
hostPath, found := os.LookupEnv("HOST_PATH")
if !found {
hostPath = ""
}
rnd, err := rand.Int(rand.Reader, big.NewInt(65536-1024))
if err != nil {
return nil, fmt.Errorf("failed to generate a random: %v", err)
}
gethHTTPPort := int(rnd.Int64()) + 1024
if err := os.Setenv("GETH_HTTP_PORT", fmt.Sprintf("%d", gethHTTPPort)); err != nil {
return nil, fmt.Errorf("failed to set GETH_HTTP_PORT: %v", err)
}
return &PoSL1TestEnv{
dockerComposeFile: filepath.Join(rootDir, "common", "docker-compose", "l1", "docker-compose.yml"),
gethHTTPPort: gethHTTPPort,
hostPath: hostPath,
}, nil
}
// Start starts the PoS L1 test environment by running the associated Docker Compose configuration.
func (e *PoSL1TestEnv) Start() error {
var err error
e.compose, err = tc.NewDockerCompose([]string{e.dockerComposeFile}...)
if err != nil {
return fmt.Errorf("failed to create docker compose: %w", err)
}
env := map[string]string{
"GETH_HTTP_PORT": fmt.Sprintf("%d", e.gethHTTPPort),
}
if e.hostPath != "" {
env["HOST_PATH"] = e.hostPath
}
if err = e.compose.WaitForService("geth", wait.NewHTTPStrategy("/").WithPort("8545/tcp").WithStartupTimeout(15*time.Second)).WithEnv(env).Up(context.Background()); err != nil {
if errStop := e.Stop(); errStop != nil {
log.Error("failed to stop PoS L1 test environment", "err", errStop)
}
return fmt.Errorf("failed to start PoS L1 test environment: %w", err)
}
return nil
}
// Stop stops the PoS L1 test environment by stopping and removing the associated Docker Compose services.
func (e *PoSL1TestEnv) Stop() error {
if e.compose != nil {
if err := e.compose.Down(context.Background(), tc.RemoveOrphans(true), tc.RemoveVolumes(true), tc.RemoveImagesLocal); err != nil {
return fmt.Errorf("failed to stop PoS L1 test environment: %w", err)
}
}
return nil
}
// Endpoint returns the HTTP endpoint for the PoS L1 test environment.
func (e *PoSL1TestEnv) Endpoint() string {
return fmt.Sprintf("http://127.0.0.1:%d", e.gethHTTPPort)
}
// L1Client returns an ethclient by dialing the running PoS L1 test environment
func (e *PoSL1TestEnv) L1Client() (*ethclient.Client, error) {
if e == nil {
return nil, fmt.Errorf("PoS L1 test environment is not initialized")
}
client, err := ethclient.Dial(e.Endpoint())
if err != nil {
return nil, fmt.Errorf("failed to dial PoS L1 test environment: %w", err)
}
return client, nil
}
func findProjectRootDir() (string, error) {
currentDir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get working directory: %w", err)
}
for {
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
if err == nil {
return currentDir, nil
}
parentDir := filepath.Dir(currentDir)
if parentDir == currentDir {
return "", fmt.Errorf("go.work file not found in any parent directory")
}
currentDir = parentDir
}
}

View File

@@ -5,7 +5,6 @@ go 1.21
require (
github.com/Masterminds/semver/v3 v3.2.1
github.com/bits-and-blooms/bitset v1.12.0
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004
github.com/docker/docker v25.0.3+incompatible
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1

View File

@@ -8,8 +8,8 @@ services:
mkdir -p /data/execution &&
cp -a /execution/* /data/execution/"
volumes:
- ${HOST_PATH:-../../..}/common/docker-compose/l1/consensus:/consensus
- ${HOST_PATH:-../../..}/common/docker-compose/l1/execution:/execution
- ../../common/testcontainers/consensus:/consensus
- ../../common/testcontainers/execution:/execution
- data:/data
# Creates a genesis state for the beacon chain using a YAML configuration file and
@@ -96,7 +96,7 @@ services:
- --nodiscover
- --syncmode=full
ports:
- ${GETH_HTTP_PORT:-8545}:8545
- 8545
depends_on:
geth-genesis:
condition: service_completed_successfully

View File

@@ -4,10 +4,13 @@ import (
"context"
"fmt"
"log"
"os"
"path/filepath"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/compose"
"github.com/testcontainers/testcontainers-go/modules/postgres"
"github.com/testcontainers/testcontainers-go/wait"
"gorm.io/gorm"
@@ -18,8 +21,8 @@ import (
// TestcontainerApps testcontainers struct
type TestcontainerApps struct {
postgresContainer *postgres.PostgresContainer
l1GethContainer *testcontainers.DockerContainer
l2GethContainer *testcontainers.DockerContainer
poSL1Container compose.ComposeStack
// common time stamp in nanoseconds.
Timestamp int
@@ -28,6 +31,11 @@ type TestcontainerApps struct {
// NewTestcontainerApps returns new instance of TestcontainerApps struct
func NewTestcontainerApps() *TestcontainerApps {
timestamp := time.Now().Nanosecond()
// In order to solve the problem of "creating reaper failed: failed to create container"
// refer to https://github.com/testcontainers/testcontainers-go/issues/2172
if err := os.Setenv("TESTCONTAINERS_RYUK_DISABLED", "true"); err != nil {
panic("set env failed: " + err.Error())
}
return &TestcontainerApps{
Timestamp: timestamp,
}
@@ -53,30 +61,6 @@ func (t *TestcontainerApps) StartPostgresContainer() error {
return nil
}
// StartL1GethContainer starts a L1Geth container
func (t *TestcontainerApps) StartL1GethContainer() error {
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
return nil
}
req := testcontainers.ContainerRequest{
Image: "scroll_l1geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
Cmd: []string{"--log.debug", "ANY"},
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
}
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
if err != nil {
log.Printf("failed to start scroll_l1geth container: %s", err)
return err
}
t.l1GethContainer, _ = container.(*testcontainers.DockerContainer)
return nil
}
// StartL2GethContainer starts a L2Geth container
func (t *TestcontainerApps) StartL2GethContainer() error {
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
@@ -85,7 +69,10 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
req := testcontainers.ContainerRequest{
Image: "scroll_l2geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
@@ -100,6 +87,55 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
return nil
}
// StartPoSL1Container starts the PoS L1 container by running the associated Docker Compose configuration
func (t *TestcontainerApps) StartPoSL1Container() error {
var (
err error
rootDir string
dockerComposeFile string
)
if rootDir, err = findProjectRootDir(); err != nil {
return fmt.Errorf("failed to find project root directory: %v", err)
}
dockerComposeFile = filepath.Join(rootDir, "common", "testcontainers", "docker-compose.yml")
if t.poSL1Container, err = compose.NewDockerCompose([]string{dockerComposeFile}...); err != nil {
return err
}
err = t.poSL1Container.WaitForService("geth", wait.NewHTTPStrategy("/").
WithPort("8545/tcp").
WithStartupTimeout(15*time.Second)).
Up(context.Background())
if err != nil {
t.poSL1Container = nil
return fmt.Errorf("failed to start PoS L1 container: %w", err)
}
return nil
}
// GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint
func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
if t.poSL1Container == nil {
return "", fmt.Errorf("PoS L1 container is not running")
}
contrainer, err := t.poSL1Container.ServiceContainer(context.Background(), "geth")
if err != nil {
return "", err
}
return contrainer.PortEndpoint(context.Background(), "8545/tcp", "http")
}
// GetPoSL1Client returns a ethclient by dialing running PoS L1 client
func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
endpoint, err := t.GetPoSL1EndPoint()
if err != nil {
return nil, err
}
return ethclient.Dial(endpoint)
}
// GetDBEndPoint returns the endpoint of the running postgres container
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
@@ -108,18 +144,6 @@ func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
}
// GetL1GethEndPoint returns the endpoint of the running L1Geth container
func (t *TestcontainerApps) GetL1GethEndPoint() (string, error) {
if t.l1GethContainer == nil || !t.l1GethContainer.IsRunning() {
return "", fmt.Errorf("l1 geth is not running")
}
endpoint, err := t.l1GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
if err != nil {
return "", err
}
return endpoint, nil
}
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
@@ -147,19 +171,6 @@ func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
return database.InitDB(dbCfg)
}
// GetL1GethClient returns a ethclient by dialing running L1Geth
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL1GethEndPoint()
if err != nil {
return nil, err
}
client, err := ethclient.Dial(endpoint)
if err != nil {
return nil, err
}
return client, nil
}
// GetL2GethClient returns a ethclient by dialing running L2Geth
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL2GethEndPoint()
@@ -181,14 +192,38 @@ func (t *TestcontainerApps) Free() {
log.Printf("failed to stop postgres container: %s", err)
}
}
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
if err := t.l1GethContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop scroll_l1geth container: %s", err)
}
}
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
if err := t.l2GethContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop scroll_l2geth container: %s", err)
}
}
if t.poSL1Container != nil {
if err := t.poSL1Container.Down(context.Background(), compose.RemoveOrphans(true), compose.RemoveVolumes(true), compose.RemoveImagesLocal); err != nil {
log.Printf("failed to stop PoS L1 container: %s", err)
} else {
t.poSL1Container = nil
}
}
}
// findProjectRootDir find project root directory
func findProjectRootDir() (string, error) {
currentDir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get working directory: %w", err)
}
for {
_, err := os.Stat(filepath.Join(currentDir, "go.work"))
if err == nil {
return currentDir, nil
}
parentDir := filepath.Dir(currentDir)
if parentDir == currentDir {
return "", fmt.Errorf("go.work file not found in any parent directory")
}
currentDir = parentDir
}
}

View File

@@ -17,8 +17,9 @@ func TestNewTestcontainerApps(t *testing.T) {
ethclient *ethclient.Client
)
// test start testcontainers
testApps := NewTestcontainerApps()
// test start testcontainers
assert.NoError(t, testApps.StartPostgresContainer())
endpoint, err = testApps.GetDBEndPoint()
assert.NoError(t, err)
@@ -27,14 +28,6 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, gormDBclient)
assert.NoError(t, testApps.StartL1GethContainer())
endpoint, err = testApps.GetL1GethEndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetL1GethClient()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartL2GethContainer())
endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
@@ -43,17 +36,25 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartPoSL1Container())
endpoint, err = testApps.GetPoSL1EndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetPoSL1Client()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
// test free testcontainers
testApps.Free()
endpoint, err = testApps.GetDBEndPoint()
assert.EqualError(t, err, "postgres is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetL1GethEndPoint()
assert.EqualError(t, err, "l1 geth is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetL2GethEndPoint()
assert.EqualError(t, err, "l2 geth is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetPoSL1EndPoint()
assert.EqualError(t, err, "PoS L1 container is not running")
assert.Empty(t, endpoint)
}

View File

@@ -9,7 +9,7 @@ import (
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// note the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.92"
var tag = "v4.4.3"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -67,7 +67,7 @@ Commit a batch of transactions on layer 1.
function committedBatches(uint256) external view returns (bytes32)
```
Return the batch hash of a committed batch.
@@ -81,7 +81,7 @@ Return the batch hash of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The batch hash of a committed batch. |
### finalizeBatchWithProof
@@ -130,7 +130,7 @@ Finalize a committed batch (with blob) on layer 1.
function finalizedStateRoots(uint256) external view returns (bytes32)
```
Return the state root of a committed batch.
@@ -144,7 +144,7 @@ Return the state root of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The state root of a committed batch. |
### importGenesisBatch
@@ -160,8 +160,8 @@ Import layer 2 genesis block
| Name | Type | Description |
|---|---|---|
| _batchHeader | bytes | undefined |
| _stateRoot | bytes32 | undefined |
| _batchHeader | bytes | The header of the genesis batch. |
| _stateRoot | bytes32 | The state root of the genesis block. |
### initialize
@@ -187,7 +187,7 @@ Initialize the storage of ScrollChain.
function isBatchFinalized(uint256 _batchIndex) external view returns (bool)
```
Return whether the batch is finalized by batch index.
@@ -201,7 +201,7 @@ Return whether the batch is finalized by batch index.
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
| _0 | bool | Whether the batch is finalized by batch index. |
### isProver
@@ -253,7 +253,7 @@ Whether an account is a sequencer.
function lastFinalizedBatchIndex() external view returns (uint256)
```
The latest finalized batch index.
@@ -262,7 +262,7 @@ The latest finalized batch index.
| Name | Type | Description |
|---|---|---|
| _0 | uint256 | undefined |
| _0 | uint256 | The latest finalized batch index. |
### layer2ChainId
@@ -480,7 +480,7 @@ The address of RollupVerifier.
function withdrawRoots(uint256) external view returns (bytes32)
```
Return the message root of a committed batch.
@@ -494,7 +494,7 @@ Return the message root of a committed batch.
| Name | Type | Description |
|---|---|---|
| _0 | bytes32 | undefined |
| _0 | bytes32 | The message root of a committed batch. |

View File

@@ -11,7 +11,7 @@ import {IScrollERC20Upgradeable} from "../../libraries/token/IScrollERC20Upgrade
/// @title L2ERC20Gateway
/// @notice The `L2ERC20Gateway` is used to withdraw custom ERC20 compatible tokens on layer 2 and
/// finalize deposit the tokens from layer 1.
/// @dev The withdrawn tokens tokens will be burned directly. On finalizing deposit, the corresponding
/// @dev The withdrawn tokens will be burned directly. On finalizing deposit, the corresponding
/// tokens will be minted and transferred to the recipient.
contract L2CustomERC20Gateway is L2ERC20Gateway {
/**********

View File

@@ -0,0 +1,35 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
library BatchBridgeCodec {
/// @dev Encode the `token` and `batchIndex` to single `bytes32`.
function encodeInitialNode(address token, uint64 batchIndex) internal pure returns (bytes32 node) {
assembly {
node := add(shl(96, token), batchIndex)
}
}
/// @dev Encode the `sender` and `amount` to single `bytes32`.
function encodeNode(address sender, uint96 amount) internal pure returns (bytes32 node) {
assembly {
node := add(shl(96, sender), amount)
}
}
/// @dev Decode `bytes32` `node` to `receiver` and `amount`.
function decodeNode(bytes32 node) internal pure returns (address receiver, uint256 amount) {
receiver = address(uint160(uint256(node) >> 96));
amount = uint256(node) & 0xffffffffffffffffffffffff;
}
/// @dev Compute `keccak256(concat(a, b))`.
function hash(bytes32 a, bytes32 b) internal pure returns (bytes32 value) {
// solhint-disable-next-line no-inline-assembly
assembly {
mstore(0x00, a)
mstore(0x20, b)
value := keccak256(0x00, 0x40)
}
}
}

View File

@@ -0,0 +1,425 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {AccessControlEnumerableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/AccessControlEnumerableUpgradeable.sol";
import {ReentrancyGuardUpgradeable} from "@openzeppelin/contracts-upgradeable/security/ReentrancyGuardUpgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {AddressUpgradeable} from "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol";
import {IL1ERC20Gateway} from "../L1/gateways/IL1ERC20Gateway.sol";
import {IL1GatewayRouter} from "../L1/gateways/IL1GatewayRouter.sol";
import {IL1MessageQueue} from "../L1/rollup/IL1MessageQueue.sol";
import {IL1ScrollMessenger} from "../L1/IL1ScrollMessenger.sol";
import {BatchBridgeCodec} from "./BatchBridgeCodec.sol";
import {L2BatchBridgeGateway} from "./L2BatchBridgeGateway.sol";
/// @title L1BatchBridgeGateway
contract L1BatchBridgeGateway is AccessControlEnumerableUpgradeable, ReentrancyGuardUpgradeable {
using SafeERC20Upgradeable for IERC20Upgradeable;
/**********
* Events *
**********/
/// @notice Emitted when some user deposited token to this contract.
/// @param sender The address of token sender.
/// @param token The address of deposited token.
/// @param batchIndex The batch index of current deposit.
/// @param amount The amount of token deposited (including fee).
/// @param fee The amount of fee charged.
event Deposit(
address indexed sender,
address indexed token,
uint256 indexed batchIndex,
uint256 amount,
uint256 fee
);
/// @notice Emitted when a batch deposit is initiated.
/// @param caller The address of caller who initiate the deposit.
/// @param l1Token The address of the token in L1 to deposit.
/// @param batchIndex The index of current batch deposit.
/// @param l2Token The address of the corresponding token in L2.
event BatchDeposit(address indexed caller, address indexed l1Token, uint256 indexed batchIndex, address l2Token);
/**********
* Errors *
**********/
/// @dev Thrown when caller is not `messenger`.
error ErrorCallerNotMessenger();
/// @dev Thrown when the deposited amount is smaller than `minAmountPerTx`.
error ErrorDepositAmountTooSmall();
/// @dev Thrown when users try to deposit ETH with `depositERC20` method.
error ErrorIncorrectMethodForETHDeposit();
/// @dev Thrown when the `msg.value` is not enough for batch deposit fee.
error ErrorInsufficientMsgValueForBatchDepositFee();
/// @dev Thrown when the given new batch config is invalid.
error ErrorInvalidBatchConfig();
/// @dev Thrown when no pending batch exists.
error ErrorNoPendingBatch();
/// @dev Thrown when user deposits unsupported tokens.
error ErrorTokenNotSupported();
/// @dev Thrown when ETH transfer failed.
error ErrorTransferETHFailed();
/*************
* Constants *
*************/
/// @notice The role for batch deposit keeper.
bytes32 public constant KEEPER_ROLE = keccak256("KEEPER_ROLE");
/// @notice The safe gas limit for batch bridge.
uint256 private constant SAFE_BATCH_BRIDGE_GAS_LIMIT = 200000;
/// @notice The address of corresponding `L2BatchDepositGateway` contract.
address public immutable counterpart;
/// @notice The address of `L1GatewayRouter` contract.
address public immutable router;
/// @notice The address of `L1ScrollMessenger` contract.
address public immutable messenger;
/// @notice The address of `L1MessageQueue` contract.
address public immutable queue;
/***********
* Structs *
***********/
/// @notice The config for batch token bridge.
/// @dev Compiler will pack this into a single `bytes32`.
/// @param feeAmountPerTx The amount of fee charged for each deposit.
/// @param minAmountPerTx The minimum amount of token for each deposit.
/// @param maxTxsPerBatch The maximum number of deposit in each batch.
/// @param maxDelayPerBatch The maximum number of seconds to wait in each batch.
/// @param safeBridgeGasLimit The safe bridge gas limit for bridging token from L1 to L2.
struct BatchConfig {
uint96 feeAmountPerTx;
uint96 minAmountPerTx;
uint16 maxTxsPerBatch;
uint24 maxDelayPerBatch;
uint24 safeBridgeGasLimit;
}
/// @dev Compiler will pack this into two `bytes32`.
/// @param amount The total amount of token to deposit in current batch.
/// @param startTime The timestamp of the first deposit.
/// @param numDeposits The total number of deposits in current batch.
/// @param hash The hash of current batch.
/// Suppose there are `n` deposits in current batch with `senders` and `amounts`. The hash is computed as
/// ```text
/// hash[0] = concat(token, batch_index)
/// hash[i] = keccak(hash[i-1], concat(senders[i], amounts[i]))
/// ```
/// The type of `token` and `senders` is `address`, while The type of `batch_index` and `amounts[i]` is `uint96`.
/// In current way, the hash of each batch among all tokens should be different.
struct BatchState {
uint128 amount;
uint64 startTime;
uint64 numDeposits;
bytes32 hash;
}
/// @dev Compiler will pack this into a single `bytes32`.
/// @param pending The total amount of token pending to bridge.
/// @param currentBatchIndex The index of current batch.
/// @param pendingBatchIndex The index of pending batch (next batch to bridge).
struct TokenState {
uint128 pending;
uint64 currentBatchIndex;
uint64 pendingBatchIndex;
}
/*************
* Variables *
*************/
/// @notice Mapping from token address to batch bridge config.
/// @dev The `address(0)` is used for ETH.
mapping(address => BatchConfig) public configs;
/// @notice Mapping from token address to batch index to batch state.
/// @dev The `address(0)` is used for ETH.
mapping(address => mapping(uint256 => BatchState)) public batches;
/// @notice Mapping from token address to token state.
/// @dev The `address(0)` is used for ETH.
mapping(address => TokenState) public tokens;
/// @notice The address of fee vault.
address public feeVault;
/***************
* Constructor *
***************/
/// @param _counterpart The address of `L2BatchDepositGateway` contract in L2.
/// @param _router The address of `L1GatewayRouter` contract in L1.
/// @param _messenger The address of `L1ScrollMessenger` contract in L1.
/// @param _queue The address of `L1MessageQueue` contract in L1.
constructor(
address _counterpart,
address _router,
address _messenger,
address _queue
) {
_disableInitializers();
counterpart = _counterpart;
router = _router;
messenger = _messenger;
queue = _queue;
}
/// @notice Initialize the storage of `L1BatchDepositGateway`.
/// @param _feeVault The address of fee vault contract.
function initialize(address _feeVault) external initializer {
__Context_init(); // from ContextUpgradeable
__ERC165_init(); // from ERC165Upgradeable
__AccessControl_init(); // from AccessControlUpgradeable
__AccessControlEnumerable_init(); // from AccessControlEnumerableUpgradeable
__ReentrancyGuard_init(); // from ReentrancyGuardUpgradeable
feeVault = _feeVault;
_grantRole(DEFAULT_ADMIN_ROLE, _msgSender());
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Receive refunded ETH from `L1ScrollMessenger`.
receive() external payable {
if (_msgSender() != messenger) {
revert ErrorCallerNotMessenger();
}
}
/// @notice Deposit ETH.
function depositETH() external payable {
// no safe cast check here, since no one has so much ETH yet.
_deposit(address(0), _msgSender(), uint96(msg.value));
}
/// @notice Deposit ERC20 token.
///
/// @param token The address of token.
/// @param amount The amount of token to deposit. We use type `uint96`, since it is enough for most of the major tokens.
function depositERC20(address token, uint96 amount) external {
if (token == address(0)) revert ErrorIncorrectMethodForETHDeposit();
// common practice to handle fee on transfer token.
uint256 beforeBalance = IERC20Upgradeable(token).balanceOf(address(this));
IERC20Upgradeable(token).safeTransferFrom(_msgSender(), address(this), amount);
amount = uint96(IERC20Upgradeable(token).balanceOf(address(this)) - beforeBalance);
_deposit(token, _msgSender(), amount);
}
/************************
* Restricted Functions *
************************/
/// @notice Add or update the batch bridge config for the given token.
///
/// @dev The caller should make sure `safeBridgeGasLimit` is enough for batch bridging.
///
/// @param token The address of token to update.
/// @param newConfig The new config.
function setBatchConfig(address token, BatchConfig memory newConfig) external onlyRole(DEFAULT_ADMIN_ROLE) {
if (
newConfig.maxTxsPerBatch == 0 ||
newConfig.maxDelayPerBatch == 0 ||
newConfig.feeAmountPerTx > newConfig.minAmountPerTx
) {
revert ErrorInvalidBatchConfig();
}
configs[token] = newConfig;
}
/// @notice Initiate the batch bridge of current pending batch.
/// @param token The address of the token.
function executeBatchDeposit(address token) external payable onlyRole(KEEPER_ROLE) {
BatchConfig memory cachedBatchConfig = configs[token];
TokenState memory cachedTokenState = tokens[token];
_tryFinalizeCurrentBatch(token, cachedBatchConfig, cachedTokenState);
// no batch to bridge
if (cachedTokenState.currentBatchIndex == cachedTokenState.pendingBatchIndex) {
revert ErrorNoPendingBatch();
}
// check bridge fee
uint256 depositFee = IL1MessageQueue(queue).estimateCrossDomainMessageFee(cachedBatchConfig.safeBridgeGasLimit);
uint256 batchBridgeFee = IL1MessageQueue(queue).estimateCrossDomainMessageFee(SAFE_BATCH_BRIDGE_GAS_LIMIT);
if (msg.value < depositFee + batchBridgeFee) {
revert ErrorInsufficientMsgValueForBatchDepositFee();
}
// take accumulated fee to fee vault
uint256 accumulatedFee;
if (token == address(0)) {
// no uncheck here just in case
accumulatedFee = address(this).balance - msg.value - cachedTokenState.pending;
} else {
// no uncheck here just in case
accumulatedFee = IERC20Upgradeable(token).balanceOf(address(this)) - cachedTokenState.pending;
}
if (accumulatedFee > 0) {
_transferToken(token, feeVault, accumulatedFee);
}
// deposit token to L2
BatchState memory cachedBatchState = batches[token][cachedTokenState.pendingBatchIndex];
address l2Token;
if (token == address(0)) {
IL1ScrollMessenger(messenger).sendMessage{value: cachedBatchState.amount + depositFee}(
counterpart,
cachedBatchState.amount,
new bytes(0),
cachedBatchConfig.safeBridgeGasLimit
);
} else {
address gateway = IL1GatewayRouter(router).getERC20Gateway(token);
l2Token = IL1ERC20Gateway(gateway).getL2ERC20Address(token);
IERC20Upgradeable(token).safeApprove(gateway, 0);
IERC20Upgradeable(token).safeApprove(gateway, cachedBatchState.amount);
IL1ERC20Gateway(gateway).depositERC20{value: depositFee}(
token,
counterpart,
cachedBatchState.amount,
cachedBatchConfig.safeBridgeGasLimit
);
}
// notify `L2BatchBridgeGateway`
IL1ScrollMessenger(messenger).sendMessage{value: batchBridgeFee}(
counterpart,
0,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(token, l2Token, cachedTokenState.pendingBatchIndex, cachedBatchState.hash)
),
SAFE_BATCH_BRIDGE_GAS_LIMIT
);
emit BatchDeposit(_msgSender(), token, cachedTokenState.pendingBatchIndex, l2Token);
// update token state
unchecked {
cachedTokenState.pending -= uint128(cachedBatchState.amount);
cachedTokenState.pendingBatchIndex += 1;
}
tokens[token] = cachedTokenState;
// refund keeper fee
unchecked {
if (msg.value > depositFee + batchBridgeFee) {
_transferToken(address(0), _msgSender(), msg.value - depositFee - batchBridgeFee);
}
}
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to deposit token.
/// @param token The address of token to deposit.
/// @param sender The address of token sender.
/// @param amount The amount of token to deposit.
function _deposit(
address token,
address sender,
uint96 amount
) internal nonReentrant {
BatchConfig memory cachedBatchConfig = configs[token];
TokenState memory cachedTokenState = tokens[token];
_tryFinalizeCurrentBatch(token, cachedBatchConfig, cachedTokenState);
BatchState memory cachedBatchState = batches[token][cachedTokenState.currentBatchIndex];
if (amount < cachedBatchConfig.minAmountPerTx) {
revert ErrorDepositAmountTooSmall();
}
emit Deposit(sender, token, cachedTokenState.currentBatchIndex, amount, cachedBatchConfig.feeAmountPerTx);
// deduct fee and update cached state
unchecked {
amount -= cachedBatchConfig.feeAmountPerTx;
cachedTokenState.pending += amount;
cachedBatchState.amount += amount;
cachedBatchState.numDeposits += 1;
}
// compute the hash chain
bytes32 node = BatchBridgeCodec.encodeNode(sender, amount);
if (cachedBatchState.hash == bytes32(0)) {
bytes32 initialNode = BatchBridgeCodec.encodeInitialNode(token, cachedTokenState.currentBatchIndex);
// this is first tx in this batch
cachedBatchState.hash = BatchBridgeCodec.hash(initialNode, node);
cachedBatchState.startTime = uint64(block.timestamp);
} else {
cachedBatchState.hash = BatchBridgeCodec.hash(cachedBatchState.hash, node);
}
batches[token][cachedTokenState.currentBatchIndex] = cachedBatchState;
tokens[token] = cachedTokenState;
}
/// @dev Internal function to finalize current batch.
/// This function may change the value of `cachedTokenState`, which can be used in later operation.
/// @param token The address of token to finalize.
/// @param cachedBatchConfig The cached batch config in memory.
/// @param cachedTokenState The cached token state in memory.
function _tryFinalizeCurrentBatch(
address token,
BatchConfig memory cachedBatchConfig,
TokenState memory cachedTokenState
) internal view {
if (cachedBatchConfig.maxTxsPerBatch == 0) {
revert ErrorTokenNotSupported();
}
BatchState memory cachedBatchState = batches[token][cachedTokenState.currentBatchIndex];
// return if it is the very first deposit in the current batch
if (cachedBatchState.numDeposits == 0) return;
// finalize current batchIndex when `maxTxsPerBatch` or `maxDelayPerBatch` reached.
if (
cachedBatchState.numDeposits == cachedBatchConfig.maxTxsPerBatch ||
block.timestamp - cachedBatchState.startTime > cachedBatchConfig.maxDelayPerBatch
) {
cachedTokenState.currentBatchIndex += 1;
}
}
/// @dev Internal function to transfer token, including ETH.
/// @param token The address of token.
/// @param receiver The address of token receiver.
/// @param amount The amount of token to transfer.
function _transferToken(
address token,
address receiver,
uint256 amount
) private {
if (token == address(0)) {
(bool success, ) = receiver.call{value: amount}("");
if (!success) revert ErrorTransferETHFailed();
} else {
IERC20Upgradeable(token).safeTransfer(receiver, amount);
}
}
}

View File

@@ -0,0 +1,246 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {AccessControlEnumerableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/AccessControlEnumerableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {IL2ScrollMessenger} from "../L2/IL2ScrollMessenger.sol";
import {BatchBridgeCodec} from "./BatchBridgeCodec.sol";
/// @title L2BatchBridgeGateway
contract L2BatchBridgeGateway is AccessControlEnumerableUpgradeable {
/**********
* Events *
**********/
/// @notice Emitted when token mapping for ERC20 token is updated.
/// @param l2Token The address of corresponding ERC20 token in layer 2.
/// @param oldL1Token The address of the old corresponding ERC20 token in layer 1.
/// @param newL1Token The address of the new corresponding ERC20 token in layer 1.
event UpdateTokenMapping(address indexed l2Token, address indexed oldL1Token, address indexed newL1Token);
/// @notice Emitted when batch bridge is finalized.
/// @param l1Token The address of token in L1.
/// @param l2Token The address of token in L2.
/// @param batchIndex The index of batch finalized.
event FinalizeBatchDeposit(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
/// @notice Emitted when batch distribution finished.
/// @param l1Token The address of token in L1.
/// @param l2Token The address of token in L2.
/// @param batchIndex The index of batch distributed.
event BatchDistribute(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
/// @notice Emitted when token distribute failed.
/// @param l2Token The address of token in L2.
/// @param batchIndex The index of the batch.
/// @param receiver The address of token receiver.
/// @param amount The amount of token to distribute.
event DistributeFailed(address indexed l2Token, uint256 indexed batchIndex, address receiver, uint256 amount);
/**********
* Errors *
**********/
/// @dev Thrown when caller is not `messenger`.
error ErrorCallerNotMessenger();
/// @dev Thrown when the L1 token mapping mismatch with `finalizeBatchBridge`.
error ErrorL1TokenMismatched();
/// @dev Thrown when message sender is not `counterpart`.
error ErrorMessageSenderNotCounterpart();
/// @dev Thrown no failed distribution exists.
error ErrorNoFailedDistribution();
/// @dev Thrown when the batch hash mismatch.
error ErrorBatchHashMismatch();
/// @dev Thrown when distributing the same batch.
error ErrorBatchDistributed();
/*************
* Constants *
*************/
/// @notice The role for batch deposit keeper.
bytes32 public constant KEEPER_ROLE = keccak256("KEEPER_ROLE");
/// @notice The safe gas limit for ETH transfer
uint256 private constant SAFE_ETH_TRANSFER_GAS_LIMIT = 50000;
/// @notice The address of corresponding `L1BatchBridgeGateway` contract.
address public immutable counterpart;
/// @notice The address of corresponding `L2ScrollMessenger` contract.
address public immutable messenger;
/*************
* Variables *
*************/
/// @notice Mapping from l2 token address to l1 token address.
mapping(address => address) public tokenMapping;
/// @notice Mapping from L2 token address to batch index to batch hash.
mapping(address => mapping(uint256 => bytes32)) public batchHashes;
/// @notice Mapping from token address to the amount of failed distribution.
mapping(address => uint256) public failedAmount;
/// @notice Mapping from batch hash to the distribute status.
mapping(bytes32 => bool) public isDistributed;
/*************
* Modifiers *
*************/
modifier onlyMessenger() {
if (_msgSender() != messenger) {
revert ErrorCallerNotMessenger();
}
_;
}
/***************
* Constructor *
***************/
/// @param _counterpart The address of `L1BatchBridgeGateway` contract in L1.
/// @param _messenger The address of `L2ScrollMessenger` contract in L2.
constructor(address _counterpart, address _messenger) {
_disableInitializers();
counterpart = _counterpart;
messenger = _messenger;
}
/// @notice Initialize the storage of `L2BatchBridgeGateway`.
function initialize() external initializer {
__Context_init(); // from ContextUpgradeable
__ERC165_init(); // from ERC165Upgradeable
__AccessControl_init(); // from AccessControlUpgradeable
__AccessControlEnumerable_init(); // from AccessControlEnumerableUpgradeable
_grantRole(DEFAULT_ADMIN_ROLE, _msgSender());
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Receive batch bridged ETH from `L2ScrollMessenger`.
receive() external payable onlyMessenger {
// empty
}
/// @notice Finalize L1 initiated batch token deposit.
/// @param l1Token The address of the token in L1.
/// @param l2Token The address of the token in L2.
/// @param batchIndex The index of this batch bridge.
/// @param hash The hash of this batch.
function finalizeBatchDeposit(
address l1Token,
address l2Token,
uint256 batchIndex,
bytes32 hash
) external onlyMessenger {
if (counterpart != IL2ScrollMessenger(messenger).xDomainMessageSender()) {
revert ErrorMessageSenderNotCounterpart();
}
// trust the messenger and update `tokenMapping` in first call
// another assumption is this function should never fail due to out of gas
address storedL1Token = tokenMapping[l2Token];
if (storedL1Token == address(0) && l1Token != address(0)) {
tokenMapping[l2Token] = l1Token;
} else if (storedL1Token != l1Token) {
// this usually won't happen, check just in case.
revert ErrorL1TokenMismatched();
}
batchHashes[l2Token][batchIndex] = hash;
emit FinalizeBatchDeposit(l1Token, l2Token, batchIndex);
}
/************************
* Restricted Functions *
************************/
/// @notice Withdraw distribution failed tokens.
/// @param token The address of token to withdraw.
/// @param receiver The address of token receiver.
function withdrawFailedAmount(address token, address receiver) external onlyRole(DEFAULT_ADMIN_ROLE) {
uint256 amount = failedAmount[token];
if (amount == 0) revert ErrorNoFailedDistribution();
failedAmount[token] = 0;
_transferToken(token, receiver, amount);
}
/// @notice Distribute deposited token to corresponding receivers.
/// @param l2Token The address of L2 token.
/// @param batchIndex The index of batch to distribute.
/// @param nodes The list of encoded L1 deposits.
function distribute(
address l2Token,
uint64 batchIndex,
bytes32[] memory nodes
) external onlyRole(KEEPER_ROLE) {
address l1Token = tokenMapping[l2Token];
bytes32 hash = BatchBridgeCodec.encodeInitialNode(l1Token, batchIndex);
for (uint256 i = 0; i < nodes.length; i++) {
hash = BatchBridgeCodec.hash(hash, nodes[i]);
}
if (batchHashes[l2Token][batchIndex] != hash) {
revert ErrorBatchHashMismatch();
}
if (isDistributed[hash]) {
revert ErrorBatchDistributed();
}
isDistributed[hash] = true;
// do transfer and allow failure to avoid DDOS attack
for (uint256 i = 0; i < nodes.length; i++) {
(address receiver, uint256 amount) = BatchBridgeCodec.decodeNode(nodes[i]);
if (!_transferToken(l2Token, receiver, amount)) {
failedAmount[l2Token] += amount;
emit DistributeFailed(l2Token, batchIndex, receiver, amount);
}
}
emit BatchDistribute(l1Token, l2Token, batchIndex);
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to transfer token, including ETH.
/// @param token The address of token.
/// @param receiver The address of token receiver.
/// @param amount The amount of token to transfer.
/// @return success Whether the transfer is successful.
function _transferToken(
address token,
address receiver,
uint256 amount
) private returns (bool success) {
if (token == address(0)) {
// We add gas limit here to avoid DDOS from malicious receiver.
(success, ) = receiver.call{value: amount, gas: SAFE_ETH_TRANSFER_GAS_LIMIT}("");
} else {
// We perform a low level call here, to bypass Solidity's return data size checking mechanism.
// Normally, the token is selected that the call would not revert unless out of gas.
bytes memory returnData;
(success, returnData) = token.call(abi.encodeCall(IERC20Upgradeable.transfer, (receiver, amount)));
if (success && returnData.length > 0) {
success = abi.decode(returnData, (bool));
}
}
}
}

View File

@@ -79,7 +79,7 @@ library BatchHeaderV0Codec {
/// @notice Get the number of L1 messages popped before this batch.
/// @param batchPtr The start memory offset of the batch header in memory.
/// @return _totalL1MessagePopped The the number of L1 messages popped before this batch.
/// @return _totalL1MessagePopped The number of L1 messages popped before this batch.
function getTotalL1MessagePopped(uint256 batchPtr) internal pure returns (uint256 _totalL1MessagePopped) {
assembly {
_totalL1MessagePopped := shr(192, mload(add(batchPtr, 17)))

View File

@@ -79,7 +79,7 @@ library BatchHeaderV1Codec {
/// @notice Get the number of L1 messages popped before this batch.
/// @param batchPtr The start memory offset of the batch header in memory.
/// @return _totalL1MessagePopped The the number of L1 messages popped before this batch.
/// @return _totalL1MessagePopped The number of L1 messages popped before this batch.
function getTotalL1MessagePopped(uint256 batchPtr) internal pure returns (uint256 _totalL1MessagePopped) {
assembly {
_totalL1MessagePopped := shr(192, mload(add(batchPtr, 17)))

View File

@@ -98,7 +98,7 @@ According to the Scroll documentation, `L1ScrollMessenger`:
This contract is central in the L2-to-L1 communication process since all messages from L2 that verified by the zkevm proof are executed on behalf of this contract.
In case of a vulnerability in the `L1ScrollMessenger`, which allows the attacker to send arbitrary messages bypassing the the zkevm proof, an attacker can immediately drain tokens from the L1 bridge.
In case of a vulnerability in the `L1ScrollMessenger`, which allows the attacker to send arbitrary messages bypassing the zkevm proof, an attacker can immediately drain tokens from the L1 bridge.
Additional risk creates the upgradeability of the `L1ScrollMessenger`. Exist a risk of an attack with the replacement of the implementation with some malicious functionality. Such an attack might be reduced to the above vulnerability and steal all locked tokens on the L1 bridge.

View File

@@ -0,0 +1,634 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
import {ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
import {Strings} from "@openzeppelin/contracts/utils/Strings.sol";
import {L1BatchBridgeGateway} from "../../batch-bridge/L1BatchBridgeGateway.sol";
import {L2BatchBridgeGateway} from "../../batch-bridge/L2BatchBridgeGateway.sol";
import {BatchBridgeCodec} from "../../batch-bridge/BatchBridgeCodec.sol";
import {IL1ERC20Gateway, L1CustomERC20Gateway} from "../../L1/gateways/L1CustomERC20Gateway.sol";
import {L1GatewayRouter} from "../../L1/gateways/L1GatewayRouter.sol";
import {IL2ERC20Gateway, L2CustomERC20Gateway} from "../../L2/gateways/L2CustomERC20Gateway.sol";
import {AddressAliasHelper} from "../../libraries/common/AddressAliasHelper.sol";
import {ScrollConstants} from "../../libraries/constants/ScrollConstants.sol";
import {L1GatewayTestBase} from "../L1GatewayTestBase.t.sol";
contract L1BatchBridgeGatewayTest is L1GatewayTestBase {
event Deposit(
address indexed sender,
address indexed token,
uint256 indexed batchIndex,
uint256 amount,
uint256 fee
);
event BatchDeposit(address indexed caller, address indexed l1Token, uint256 indexed batchIndex, address l2Token);
event DepositERC20(
address indexed _l1Token,
address indexed _l2Token,
address indexed _from,
address _to,
uint256 _amount,
bytes _data
);
uint24 private constant SAFE_BATCH_BRIDGE_GAS_LIMIT = 200000;
uint24 ETH_DEPOSIT_SAFE_GAS_LIMIT = 300000;
uint24 ERC20_DEPOSIT_SAFE_GAS_LIMIT = 200000;
uint256 private constant L2_GAS_PRICE = 10;
L1BatchBridgeGateway private batch;
L1CustomERC20Gateway private gateway;
L1GatewayRouter private router;
L2CustomERC20Gateway private counterpartGateway;
L2BatchBridgeGateway private counterpartBatch;
MockERC20 private l1Token;
MockERC20 private l2Token;
address private batchFeeVault;
function setUp() public {
__L1GatewayTestBase_setUp();
batchFeeVault = address(uint160(address(this)) - 2);
// Deploy tokens
l1Token = new MockERC20("Mock L1", "ML1", 18);
l2Token = new MockERC20("Mock L2", "ML2", 18);
// Deploy L2 contracts
counterpartGateway = new L2CustomERC20Gateway(address(1), address(1), address(1));
counterpartBatch = new L2BatchBridgeGateway(address(1), address(1));
// Deploy L1 contracts
router = L1GatewayRouter(_deployProxy(address(new L1GatewayRouter())));
gateway = L1CustomERC20Gateway(_deployProxy(address(0)));
batch = L1BatchBridgeGateway(payable(_deployProxy(address(0))));
// Initialize L1 contracts
admin.upgrade(
ITransparentUpgradeableProxy(address(gateway)),
address(new L1CustomERC20Gateway(address(counterpartGateway), address(router), address(l1Messenger)))
);
gateway.initialize(address(counterpartGateway), address(router), address(l1Messenger));
admin.upgrade(
ITransparentUpgradeableProxy(address(batch)),
address(
new L1BatchBridgeGateway(
address(counterpartBatch),
address(router),
address(l1Messenger),
address(messageQueue)
)
)
);
batch.initialize(batchFeeVault);
router.initialize(address(0), address(gateway));
messageQueue.setL2BaseFee(L2_GAS_PRICE);
// Prepare token balances
l1Token.mint(address(this), type(uint128).max);
gateway.updateTokenMapping(address(l1Token), address(l2Token));
hevm.warp(1000000);
}
function testInitialized() external {
assertBoolEq(true, batch.hasRole(bytes32(0), address(this)));
assertEq(address(counterpartBatch), batch.counterpart());
assertEq(address(router), batch.router());
assertEq(address(l1Messenger), batch.messenger());
assertEq(address(messageQueue), batch.queue());
hevm.expectRevert("Initializable: contract is already initialized");
batch.initialize(address(0));
}
function testSetTokenSetting() external {
// revert not admin
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 0, 0, 0));
hevm.stopPrank();
// revert maxTxsPerBatch = 0
hevm.expectRevert(L1BatchBridgeGateway.ErrorInvalidBatchConfig.selector);
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 0, 0, 0));
// revert maxDelayPerBatch = 0
hevm.expectRevert(L1BatchBridgeGateway.ErrorInvalidBatchConfig.selector);
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 1, 0, 0));
// revert feeAmountPerTx > minAmountPerTx
hevm.expectRevert(L1BatchBridgeGateway.ErrorInvalidBatchConfig.selector);
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(1, 0, 1, 1, 0));
// succeed
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(1, 2, 3, 4, 5));
(
uint96 feeAmountPerTx,
uint96 minAmountPerTx,
uint16 maxTxsPerBatch,
uint24 maxDelayPerBatch,
uint24 safeBridgeGasLimit
) = batch.configs(address(0));
assertEq(feeAmountPerTx, 1);
assertEq(minAmountPerTx, 2);
assertEq(maxTxsPerBatch, 3);
assertEq(maxDelayPerBatch, 4);
assertEq(safeBridgeGasLimit, 5);
}
function testSetTokenSettingFuzzing(address token, L1BatchBridgeGateway.BatchConfig memory config) external {
hevm.assume(config.maxTxsPerBatch > 0);
hevm.assume(config.maxDelayPerBatch > 0);
hevm.assume(config.feeAmountPerTx <= config.minAmountPerTx);
(
uint96 feeAmountPerTx,
uint96 minAmountPerTx,
uint16 maxTxsPerBatch,
uint24 maxDelayPerBatch,
uint24 safeBridgeGasLimit
) = batch.configs(token);
assertEq(feeAmountPerTx, 0);
assertEq(minAmountPerTx, 0);
assertEq(maxTxsPerBatch, 0);
assertEq(maxDelayPerBatch, 0);
assertEq(safeBridgeGasLimit, 0);
batch.setBatchConfig(token, config);
(feeAmountPerTx, minAmountPerTx, maxTxsPerBatch, maxDelayPerBatch, safeBridgeGasLimit) = batch.configs(token);
assertEq(feeAmountPerTx, config.feeAmountPerTx);
assertEq(minAmountPerTx, config.minAmountPerTx);
assertEq(maxTxsPerBatch, config.maxTxsPerBatch);
assertEq(maxDelayPerBatch, config.maxDelayPerBatch);
assertEq(safeBridgeGasLimit, config.safeBridgeGasLimit);
}
function checkBatchState(
address token,
uint256 phase,
L1BatchBridgeGateway.BatchState memory expected
) private {
(uint128 amount, uint64 startTime, uint64 numDeposits, bytes32 hash) = batch.batches(token, phase);
assertEq(amount, expected.amount);
assertEq(startTime, expected.startTime);
assertEq(numDeposits, expected.numDeposits);
// assertEq(hash, expected.hash);
}
function checkTokenState(address token, L1BatchBridgeGateway.TokenState memory expected) private {
(uint128 pending, uint64 currentBatchIndex, uint64 pendingBatchIndex) = batch.tokens(token);
assertEq(pending, expected.pending);
assertEq(currentBatchIndex, expected.currentBatchIndex);
assertEq(pendingBatchIndex, expected.pendingBatchIndex);
}
function testDepositETH() external {
// revert token not supported
hevm.expectRevert(L1BatchBridgeGateway.ErrorTokenNotSupported.selector);
batch.depositETH();
// revert deposit amount too small
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 100, 2, 100, ETH_DEPOSIT_SAFE_GAS_LIMIT));
hevm.expectRevert(L1BatchBridgeGateway.ErrorDepositAmountTooSmall.selector);
batch.depositETH{value: 10}();
// no fee
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 2, 100, ETH_DEPOSIT_SAFE_GAS_LIMIT));
assertEq(0, address(batch).balance);
checkBatchState(address(0), 0, L1BatchBridgeGateway.BatchState(0, 0, 0, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(0, 0, 0));
hevm.warp(1000001);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 0, 1000, 0);
batch.depositETH{value: 1000}();
assertEq(1000, address(batch).balance);
checkBatchState(address(0), 0, L1BatchBridgeGateway.BatchState(1000, 1000001, 1, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(1000, 0, 0));
hevm.warp(1000002);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 0, 2000, 0);
batch.depositETH{value: 2000}();
assertEq(3000, address(batch).balance);
checkBatchState(address(0), 0, L1BatchBridgeGateway.BatchState(3000, 1000001, 2, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(3000, 0, 0));
hevm.warp(1000003);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 1, 3000, 0);
batch.depositETH{value: 3000}();
assertEq(6000, address(batch).balance);
checkBatchState(address(0), 1, L1BatchBridgeGateway.BatchState(3000, 1000003, 1, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(6000, 1, 0));
// with fee
batch.setBatchConfig(
address(0),
L1BatchBridgeGateway.BatchConfig(100, 1000, 2, 100, ETH_DEPOSIT_SAFE_GAS_LIMIT)
);
hevm.warp(1000004);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 1, 1000, 100);
batch.depositETH{value: 1000}();
assertEq(7000, address(batch).balance);
checkBatchState(address(0), 1, L1BatchBridgeGateway.BatchState(3900, 1000003, 2, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(6900, 1, 0));
hevm.warp(1000005);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 2, 2000, 100);
batch.depositETH{value: 2000}();
assertEq(9000, address(batch).balance);
checkBatchState(address(0), 2, L1BatchBridgeGateway.BatchState(1900, 1000005, 1, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(8800, 2, 0));
hevm.warp(1000006);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 2, 3000, 100);
batch.depositETH{value: 3000}();
assertEq(12000, address(batch).balance);
checkBatchState(address(0), 2, L1BatchBridgeGateway.BatchState(4800, 1000005, 2, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(11700, 2, 0));
// switch phase by timestamp
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 100, 100, ETH_DEPOSIT_SAFE_GAS_LIMIT));
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 2, 1000, 0);
batch.depositETH{value: 1000}();
assertEq(13000, address(batch).balance);
checkBatchState(address(0), 2, L1BatchBridgeGateway.BatchState(5800, 1000005, 3, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(12700, 2, 0));
hevm.warp(1000005 + 100 + 1);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(0), 3, 1000, 0);
batch.depositETH{value: 1000}();
assertEq(14000, address(batch).balance);
checkBatchState(address(0), 3, L1BatchBridgeGateway.BatchState(1000, 1000005 + 100 + 1, 1, bytes32(0)));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(13700, 3, 0));
}
function testDepositERC20() external {
// revert token is zero
hevm.expectRevert(L1BatchBridgeGateway.ErrorIncorrectMethodForETHDeposit.selector);
batch.depositERC20(address(0), 0);
// revert token not supported
hevm.expectRevert(L1BatchBridgeGateway.ErrorTokenNotSupported.selector);
batch.depositERC20(address(l1Token), 0);
// revert deposit amount too small
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(0, 100, 2, 100, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
l1Token.approve(address(batch), 10);
hevm.expectRevert(L1BatchBridgeGateway.ErrorDepositAmountTooSmall.selector);
batch.depositERC20(address(l1Token), 10);
// no fee
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(0, 0, 2, 100, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
assertEq(0, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 0, L1BatchBridgeGateway.BatchState(0, 0, 0, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(0, 0, 0));
hevm.warp(1000001);
l1Token.approve(address(batch), 1000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 0, 1000, 0);
batch.depositERC20(address(l1Token), 1000);
assertEq(1000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 0, L1BatchBridgeGateway.BatchState(1000, 1000001, 1, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(1000, 0, 0));
hevm.warp(1000002);
l1Token.approve(address(batch), 2000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 0, 2000, 0);
batch.depositERC20(address(l1Token), 2000);
assertEq(3000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 0, L1BatchBridgeGateway.BatchState(3000, 1000001, 2, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(3000, 0, 0));
hevm.warp(1000003);
l1Token.approve(address(batch), 3000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 1, 3000, 0);
batch.depositERC20(address(l1Token), 3000);
assertEq(6000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 1, L1BatchBridgeGateway.BatchState(3000, 1000003, 1, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(6000, 1, 0));
// with fee
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(100, 1000, 2, 100, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
hevm.warp(1000004);
l1Token.approve(address(batch), 1000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 1, 1000, 100);
batch.depositERC20(address(l1Token), 1000);
assertEq(7000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 1, L1BatchBridgeGateway.BatchState(3900, 1000003, 2, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(6900, 1, 0));
hevm.warp(1000005);
l1Token.approve(address(batch), 2000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 2, 2000, 100);
batch.depositERC20(address(l1Token), 2000);
assertEq(9000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 2, L1BatchBridgeGateway.BatchState(1900, 1000005, 1, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(8800, 2, 0));
hevm.warp(1000006);
l1Token.approve(address(batch), 3000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 2, 3000, 100);
batch.depositERC20(address(l1Token), 3000);
assertEq(12000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 2, L1BatchBridgeGateway.BatchState(4800, 1000005, 2, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(11700, 2, 0));
// switch phase by timestamp
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(0, 0, 100, 100, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
l1Token.approve(address(batch), 1000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 2, 1000, 0);
batch.depositERC20(address(l1Token), 1000);
assertEq(13000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 2, L1BatchBridgeGateway.BatchState(5800, 1000005, 3, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(12700, 2, 0));
hevm.warp(1000005 + 100 + 1);
l1Token.approve(address(batch), 1000);
hevm.expectEmit(true, true, true, true);
emit Deposit(address(this), address(l1Token), 3, 1000, 0);
batch.depositERC20(address(l1Token), 1000);
assertEq(14000, l1Token.balanceOf(address(batch)));
checkBatchState(address(l1Token), 3, L1BatchBridgeGateway.BatchState(1000, 1000005 + 100 + 1, 1, bytes32(0)));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(13700, 3, 0));
}
function testBatchBridgeFailure() external {
// revert not keeper
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0xfc8737ab85eb45125971625a9ebdb75cc78e01d5c1fa80c4c6e5203f47bc4fab"
);
batch.executeBatchDeposit(address(0));
hevm.stopPrank();
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert token not supported
hevm.expectRevert(L1BatchBridgeGateway.ErrorTokenNotSupported.selector);
batch.executeBatchDeposit(address(0));
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 1, 1, ETH_DEPOSIT_SAFE_GAS_LIMIT));
// revert no pending
hevm.expectRevert(L1BatchBridgeGateway.ErrorNoPendingBatch.selector);
batch.executeBatchDeposit(address(0));
// revert insufficient msg.value
batch.depositETH{value: 1000}();
hevm.expectRevert(L1BatchBridgeGateway.ErrorInsufficientMsgValueForBatchDepositFee.selector);
batch.executeBatchDeposit(address(0));
hevm.expectRevert(L1BatchBridgeGateway.ErrorInsufficientMsgValueForBatchDepositFee.selector);
batch.executeBatchDeposit{value: L2_GAS_PRICE * ETH_DEPOSIT_SAFE_GAS_LIMIT}(address(0));
hevm.expectRevert(L1BatchBridgeGateway.ErrorInsufficientMsgValueForBatchDepositFee.selector);
batch.executeBatchDeposit{value: L2_GAS_PRICE * (SAFE_BATCH_BRIDGE_GAS_LIMIT + ETH_DEPOSIT_SAFE_GAS_LIMIT) - 1}(
address(0)
);
// succeed
batch.executeBatchDeposit{value: L2_GAS_PRICE * (SAFE_BATCH_BRIDGE_GAS_LIMIT + ETH_DEPOSIT_SAFE_GAS_LIMIT)}(
address(0)
);
}
function testBatchBridgeETH() external {
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// no deposit fee
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(0, 0, 1, 1, ETH_DEPOSIT_SAFE_GAS_LIMIT));
batch.depositETH{value: 1000}();
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(1000, 0, 0));
// emit SentMessage by deposit ETH
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(batch), address(counterpartBatch), 1000, 0, ETH_DEPOSIT_SAFE_GAS_LIMIT, "");
// emit SentMessage by batchBridge
hevm.expectEmit(true, true, false, true);
emit SentMessage(
address(batch),
address(counterpartBatch),
0,
1,
SAFE_BATCH_BRIDGE_GAS_LIMIT,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(
address(0),
address(0),
0,
BatchBridgeCodec.hash(
BatchBridgeCodec.encodeInitialNode(address(0), 0),
BatchBridgeCodec.encodeNode(address(this), 1000)
)
)
)
);
// emit BatchBridge
hevm.expectEmit(true, true, true, true);
emit BatchDeposit(address(this), address(0), 0, address(0));
uint256 batchFeeVaultBefore = batchFeeVault.balance;
uint256 messengerBefore = address(l1Messenger).balance;
batch.executeBatchDeposit{value: 1 ether}(address(0));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(0, 1, 1));
assertEq(batchFeeVaultBefore, batchFeeVault.balance);
assertEq(messengerBefore + 1000, address(l1Messenger).balance);
// has deposit fee = 100
batch.setBatchConfig(address(0), L1BatchBridgeGateway.BatchConfig(100, 1000, 1, 1, ETH_DEPOSIT_SAFE_GAS_LIMIT));
batch.depositETH{value: 1000}();
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(900, 1, 1));
// emit SentMessage by deposit ETH
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(batch), address(counterpartBatch), 900, 2, ETH_DEPOSIT_SAFE_GAS_LIMIT, "");
// emit SentMessage by batchBridge
hevm.expectEmit(true, true, false, true);
emit SentMessage(
address(batch),
address(counterpartBatch),
0,
3,
SAFE_BATCH_BRIDGE_GAS_LIMIT,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(
address(0),
address(0),
1,
BatchBridgeCodec.hash(
BatchBridgeCodec.encodeInitialNode(address(0), 1),
BatchBridgeCodec.encodeNode(address(this), 900)
)
)
)
);
// emit BatchBridge
hevm.expectEmit(true, true, true, true);
emit BatchDeposit(address(this), address(0), 1, address(0));
batchFeeVaultBefore = batchFeeVault.balance;
messengerBefore = address(l1Messenger).balance;
batch.executeBatchDeposit{value: 1 ether}(address(0));
checkTokenState(address(0), L1BatchBridgeGateway.TokenState(0, 2, 2));
assertEq(batchFeeVaultBefore + 100, batchFeeVault.balance);
assertEq(messengerBefore + 900, address(l1Messenger).balance);
}
function testBatchBridgeERC20() external {
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// no deposit fee
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(0, 0, 1, 1, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
l1Token.approve(address(batch), 1000);
batch.depositERC20(address(l1Token), 1000);
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(1000, 0, 0));
bytes memory message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
address(l1Token),
address(l2Token),
address(batch),
address(counterpartBatch),
1000,
new bytes(0)
);
// emit SentMessage by deposit ERC20
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, ERC20_DEPOSIT_SAFE_GAS_LIMIT, message);
// emit SentMessage by batchBridge
hevm.expectEmit(true, true, false, true);
emit SentMessage(
address(batch),
address(counterpartBatch),
0,
1,
SAFE_BATCH_BRIDGE_GAS_LIMIT,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(
address(l1Token),
address(l2Token),
0,
BatchBridgeCodec.hash(
BatchBridgeCodec.encodeInitialNode(address(l1Token), 0),
BatchBridgeCodec.encodeNode(address(this), 1000)
)
)
)
);
// emit BatchBridge
hevm.expectEmit(true, true, true, true);
emit BatchDeposit(address(this), address(l1Token), 0, address(l2Token));
uint256 batchFeeVaultBefore = l1Token.balanceOf(batchFeeVault);
uint256 gatewayBefore = l1Token.balanceOf(address(gateway));
batch.executeBatchDeposit{value: 1 ether}(address(l1Token));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(0, 1, 1));
assertEq(batchFeeVaultBefore, l1Token.balanceOf(batchFeeVault));
assertEq(gatewayBefore + 1000, l1Token.balanceOf(address(gateway)));
// has deposit fee = 100
batch.setBatchConfig(
address(l1Token),
L1BatchBridgeGateway.BatchConfig(100, 1000, 1, 1, ERC20_DEPOSIT_SAFE_GAS_LIMIT)
);
l1Token.approve(address(batch), 1000);
batch.depositERC20(address(l1Token), 1000);
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(900, 1, 1));
message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
address(l1Token),
address(l2Token),
address(batch),
address(counterpartBatch),
900,
new bytes(0)
);
// emit SentMessage by deposit ERC20
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 2, ERC20_DEPOSIT_SAFE_GAS_LIMIT, message);
// emit SentMessage by batchBridge
hevm.expectEmit(true, true, false, true);
emit SentMessage(
address(batch),
address(counterpartBatch),
0,
3,
SAFE_BATCH_BRIDGE_GAS_LIMIT,
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(
address(l1Token),
address(l2Token),
1,
BatchBridgeCodec.hash(
BatchBridgeCodec.encodeInitialNode(address(l1Token), 1),
BatchBridgeCodec.encodeNode(address(this), 900)
)
)
)
);
// emit BatchBridge
hevm.expectEmit(true, true, true, true);
emit BatchDeposit(address(this), address(l1Token), 1, address(l2Token));
batchFeeVaultBefore = l1Token.balanceOf(batchFeeVault);
gatewayBefore = l1Token.balanceOf(address(gateway));
batch.executeBatchDeposit{value: 1 ether}(address(l1Token));
checkTokenState(address(l1Token), L1BatchBridgeGateway.TokenState(0, 2, 2));
assertEq(batchFeeVaultBefore + 100, l1Token.balanceOf(batchFeeVault));
assertEq(gatewayBefore + 900, l1Token.balanceOf(address(gateway)));
}
}

View File

@@ -0,0 +1,454 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
import {ITransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
import {Strings} from "@openzeppelin/contracts/utils/Strings.sol";
import {L1BatchBridgeGateway} from "../../batch-bridge/L1BatchBridgeGateway.sol";
import {L2BatchBridgeGateway} from "../../batch-bridge/L2BatchBridgeGateway.sol";
import {BatchBridgeCodec} from "../../batch-bridge/BatchBridgeCodec.sol";
import {RevertOnTransferToken} from "../mocks/tokens/RevertOnTransferToken.sol";
import {MockScrollMessenger} from "../mocks/MockScrollMessenger.sol";
import {ScrollTestBase} from "../ScrollTestBase.t.sol";
contract L2BatchBridgeGatewayTest is ScrollTestBase {
event UpdateTokenMapping(address indexed l2Token, address indexed oldL1Token, address indexed newL1Token);
event FinalizeBatchDeposit(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
event BatchDistribute(address indexed l1Token, address indexed l2Token, uint256 indexed batchIndex);
event DistributeFailed(address indexed l2Token, uint256 indexed batchIndex, address receiver, uint256 amount);
L1BatchBridgeGateway private counterpartBatch;
L2BatchBridgeGateway private batch;
MockScrollMessenger messenger;
MockERC20 private l1Token;
MockERC20 private l2Token;
RevertOnTransferToken private maliciousL2Token;
bool revertOnReceive;
bool loopOnReceive;
// two safe EOAs to receive ETH
address private recipient1;
address private recipient2;
receive() external payable {
if (revertOnReceive) revert();
if (loopOnReceive) {
for (uint256 i = 0; i < 1000000000; i++) {
recipient1 = address(uint160(address(this)) - 1);
}
}
}
function setUp() public {
__ScrollTestBase_setUp();
recipient1 = address(uint160(address(this)) - 1);
recipient2 = address(uint160(address(this)) - 2);
// Deploy tokens
l1Token = new MockERC20("Mock L1", "ML1", 18);
l2Token = new MockERC20("Mock L2", "ML2", 18);
maliciousL2Token = new RevertOnTransferToken("X", "Y", 18);
messenger = new MockScrollMessenger();
counterpartBatch = new L1BatchBridgeGateway(address(1), address(1), address(1), address(1));
batch = L2BatchBridgeGateway(payable(_deployProxy(address(0))));
// Initialize L2 contracts
admin.upgrade(
ITransparentUpgradeableProxy(address(batch)),
address(new L2BatchBridgeGateway(address(counterpartBatch), address(messenger)))
);
batch.initialize();
}
function testInitialized() external {
assertBoolEq(true, batch.hasRole(bytes32(0), address(this)));
assertEq(address(counterpartBatch), batch.counterpart());
assertEq(address(messenger), batch.messenger());
hevm.expectRevert("Initializable: contract is already initialized");
batch.initialize();
}
function testFinalizeBatchDeposit() external {
// revert caller not messenger
hevm.expectRevert(L2BatchBridgeGateway.ErrorCallerNotMessenger.selector);
batch.finalizeBatchDeposit(address(0), address(0), 0, bytes32(0));
// revert xDomainMessageSender not counterpart
hevm.expectRevert(L2BatchBridgeGateway.ErrorMessageSenderNotCounterpart.selector);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(l2Token), 0, bytes32(0))
)
);
messenger.setXDomainMessageSender(address(counterpartBatch));
// emit FinalizeBatchDeposit
assertEq(address(0), batch.tokenMapping(address(l2Token)));
hevm.expectEmit(true, true, true, true);
emit FinalizeBatchDeposit(address(l1Token), address(l2Token), 1);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(l2Token), 1, bytes32(uint256(1)))
)
);
assertEq(address(l1Token), batch.tokenMapping(address(l2Token)));
assertEq(batch.batchHashes(address(l2Token), 1), bytes32(uint256(1)));
// revert token not match
hevm.expectRevert(L2BatchBridgeGateway.ErrorL1TokenMismatched.selector);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(l2Token), 0, bytes32(0)))
);
}
function testFinalizeBatchDepositFuzzing(
address token1,
address token2,
uint256 batchIndex,
bytes32 hash
) external {
messenger.setXDomainMessageSender(address(counterpartBatch));
assertEq(address(0), batch.tokenMapping(token2));
hevm.expectEmit(true, true, true, true);
emit FinalizeBatchDeposit(token1, token2, batchIndex);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (token1, token2, batchIndex, hash))
);
assertEq(token1, batch.tokenMapping(token2));
assertEq(batch.batchHashes(token2, batchIndex), hash);
}
function testDistributeETH() external {
// revert not keeper
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0xfc8737ab85eb45125971625a9ebdb75cc78e01d5c1fa80c4c6e5203f47bc4fab"
);
batch.distribute(address(0), 0, new bytes32[](0));
hevm.stopPrank();
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert ErrorBatchHashMismatch
hevm.expectRevert(L2BatchBridgeGateway.ErrorBatchHashMismatch.selector);
batch.distribute(address(0), 1, new bytes32[](0));
// send some ETH to `L2BatchBridgeGateway`.
messenger.setXDomainMessageSender(address(counterpartBatch));
messenger.callTarget{value: 1 ether}(address(batch), "");
address[] memory receivers = new address[](2);
uint256[] memory amounts = new uint256[](2);
receivers[0] = recipient1;
receivers[1] = recipient2;
amounts[0] = 100;
amounts[1] = 200;
// all success
(bytes32[] memory nodes, bytes32 batchHash) = _encodeNodes(address(0), 0, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(0), 0, batchHash))
);
assertEq(0, recipient1.balance);
assertEq(0, recipient2.balance);
uint256 batchBalanceBefore = address(batch).balance;
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(0), address(0), 0);
batch.distribute(address(0), 0, nodes);
assertEq(100, recipient1.balance);
assertEq(200, recipient2.balance);
assertEq(batchBalanceBefore - 300, address(batch).balance);
assertBoolEq(true, batch.isDistributed(batchHash));
// revert ErrorBatchDistributed
hevm.expectRevert(L2BatchBridgeGateway.ErrorBatchDistributed.selector);
batch.distribute(address(0), 0, nodes);
// all failed due to revert
revertOnReceive = true;
loopOnReceive = false;
receivers[0] = address(this);
receivers[1] = address(this);
(nodes, batchHash) = _encodeNodes(address(0), 1, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(0), 1, batchHash))
);
uint256 thisBalanceBefore = address(this).balance;
batchBalanceBefore = address(batch).balance;
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(0), 1, address(this), 100);
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(0), 1, address(this), 200);
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(0), address(0), 1);
batch.distribute(address(0), 1, nodes);
assertEq(batchBalanceBefore, address(batch).balance);
assertEq(thisBalanceBefore, address(this).balance);
assertBoolEq(true, batch.isDistributed(batchHash));
assertEq(300, batch.failedAmount(address(0)));
// all failed due to out of gas
revertOnReceive = false;
loopOnReceive = true;
(nodes, batchHash) = _encodeNodes(address(0), 2, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(0), 2, batchHash))
);
thisBalanceBefore = address(this).balance;
batchBalanceBefore = address(batch).balance;
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(0), 2, address(this), 100);
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(0), 2, address(this), 200);
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(0), address(0), 2);
batch.distribute(address(0), 2, nodes);
assertEq(batchBalanceBefore, address(batch).balance);
assertEq(thisBalanceBefore, address(this).balance);
assertBoolEq(true, batch.isDistributed(batchHash));
assertEq(600, batch.failedAmount(address(0)));
}
function testDistributeERC20() external {
// revert not keeper
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0xfc8737ab85eb45125971625a9ebdb75cc78e01d5c1fa80c4c6e5203f47bc4fab"
);
batch.distribute(address(l2Token), 0, new bytes32[](0));
hevm.stopPrank();
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert ErrorBatchHashMismatch
hevm.expectRevert(L2BatchBridgeGateway.ErrorBatchHashMismatch.selector);
batch.distribute(address(l2Token), 1, new bytes32[](0));
// mint some ERC20 to `L2BatchBridgeGateway`.
messenger.setXDomainMessageSender(address(counterpartBatch));
l2Token.mint(address(batch), 1 ether);
address[] memory receivers = new address[](2);
uint256[] memory amounts = new uint256[](2);
receivers[0] = recipient1;
receivers[1] = recipient2;
amounts[0] = 100;
amounts[1] = 200;
// all success
(bytes32[] memory nodes, bytes32 batchHash) = _encodeNodes(address(l1Token), 0, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(l2Token), 0, batchHash)
)
);
assertEq(0, recipient1.balance);
assertEq(0, recipient2.balance);
uint256 batchBalanceBefore = l2Token.balanceOf(address(batch));
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(l1Token), address(l2Token), 0);
batch.distribute(address(l2Token), 0, nodes);
assertEq(100, l2Token.balanceOf(recipient1));
assertEq(200, l2Token.balanceOf(recipient2));
assertEq(batchBalanceBefore - 300, l2Token.balanceOf(address(batch)));
assertBoolEq(true, batch.isDistributed(batchHash));
// revert ErrorBatchDistributed
hevm.expectRevert(L2BatchBridgeGateway.ErrorBatchDistributed.selector);
batch.distribute(address(l2Token), 0, nodes);
maliciousL2Token.mint(address(batch), 1 ether);
// all failed due to revert
maliciousL2Token.setRevertOnTransfer(true);
receivers[0] = address(this);
receivers[1] = address(this);
(nodes, batchHash) = _encodeNodes(address(l1Token), 1, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(maliciousL2Token), 1, batchHash)
)
);
uint256 thisBalanceBefore = maliciousL2Token.balanceOf(address(this));
batchBalanceBefore = maliciousL2Token.balanceOf(address(batch));
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(maliciousL2Token), 1, address(this), 100);
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(maliciousL2Token), 1, address(this), 200);
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(l1Token), address(maliciousL2Token), 1);
batch.distribute(address(maliciousL2Token), 1, nodes);
assertEq(batchBalanceBefore, maliciousL2Token.balanceOf(address(batch)));
assertEq(thisBalanceBefore, maliciousL2Token.balanceOf(address(this)));
assertBoolEq(true, batch.isDistributed(batchHash));
assertEq(300, batch.failedAmount(address(maliciousL2Token)));
// all failed due to transfer return false
maliciousL2Token.setRevertOnTransfer(false);
maliciousL2Token.setTransferReturn(false);
(nodes, batchHash) = _encodeNodes(address(l1Token), 2, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(maliciousL2Token), 2, batchHash)
)
);
thisBalanceBefore = maliciousL2Token.balanceOf(address(this));
batchBalanceBefore = maliciousL2Token.balanceOf(address(batch));
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(maliciousL2Token), 2, address(this), 100);
hevm.expectEmit(true, true, false, true);
emit DistributeFailed(address(maliciousL2Token), 2, address(this), 200);
hevm.expectEmit(true, true, true, true);
emit BatchDistribute(address(l1Token), address(maliciousL2Token), 2);
batch.distribute(address(maliciousL2Token), 2, nodes);
assertEq(batchBalanceBefore, maliciousL2Token.balanceOf(address(batch)));
assertEq(thisBalanceBefore, maliciousL2Token.balanceOf(address(this)));
assertBoolEq(true, batch.isDistributed(batchHash));
assertEq(600, batch.failedAmount(address(maliciousL2Token)));
}
function testWithdrawFailedAmountETH() external {
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert not admin
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
batch.withdrawFailedAmount(address(0), address(this));
hevm.stopPrank();
// revert no failed
hevm.expectRevert(L2BatchBridgeGateway.ErrorNoFailedDistribution.selector);
batch.withdrawFailedAmount(address(0), address(this));
// send some ETH to `L2BatchBridgeGateway`.
messenger.setXDomainMessageSender(address(counterpartBatch));
messenger.callTarget{value: 1 ether}(address(batch), "");
// make a failed distribution
address[] memory receivers = new address[](2);
uint256[] memory amounts = new uint256[](2);
receivers[0] = address(this);
receivers[1] = address(this);
amounts[0] = 100;
amounts[1] = 200;
revertOnReceive = true;
(bytes32[] memory nodes, bytes32 batchHash) = _encodeNodes(address(0), 1, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(L2BatchBridgeGateway.finalizeBatchDeposit, (address(0), address(0), 1, batchHash))
);
assertEq(0, batch.failedAmount(address(0)));
batch.distribute(address(0), 1, nodes);
assertEq(300, batch.failedAmount(address(0)));
// withdraw failed
uint256 thisBalance = recipient1.balance;
uint256 batchBalance = address(batch).balance;
batch.withdrawFailedAmount(address(0), recipient1);
assertEq(0, batch.failedAmount(address(0)));
assertEq(thisBalance + 300, recipient1.balance);
assertEq(batchBalance - 300, address(batch).balance);
// revert no failed
hevm.expectRevert(L2BatchBridgeGateway.ErrorNoFailedDistribution.selector);
batch.withdrawFailedAmount(address(0), recipient1);
}
function testWithdrawFailedAmountERC20() external {
batch.grantRole(batch.KEEPER_ROLE(), address(this));
// revert not admin
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
batch.withdrawFailedAmount(address(0), address(this));
hevm.stopPrank();
// revert no failed
hevm.expectRevert(L2BatchBridgeGateway.ErrorNoFailedDistribution.selector);
batch.withdrawFailedAmount(address(0), address(this));
// send some ETH to `L2BatchBridgeGateway`.
messenger.setXDomainMessageSender(address(counterpartBatch));
maliciousL2Token.mint(address(batch), 1 ether);
// make a failed distribution
address[] memory receivers = new address[](2);
uint256[] memory amounts = new uint256[](2);
receivers[0] = address(this);
receivers[1] = address(this);
amounts[0] = 100;
amounts[1] = 200;
maliciousL2Token.setRevertOnTransfer(true);
(bytes32[] memory nodes, bytes32 batchHash) = _encodeNodes(address(l1Token), 1, receivers, amounts);
messenger.callTarget(
address(batch),
abi.encodeCall(
L2BatchBridgeGateway.finalizeBatchDeposit,
(address(l1Token), address(maliciousL2Token), 1, batchHash)
)
);
assertEq(0, batch.failedAmount(address(maliciousL2Token)));
batch.distribute(address(maliciousL2Token), 1, nodes);
assertEq(300, batch.failedAmount(address(maliciousL2Token)));
// withdraw failed
maliciousL2Token.setRevertOnTransfer(false);
maliciousL2Token.setTransferReturn(true);
uint256 thisBalance = maliciousL2Token.balanceOf(recipient1);
uint256 batchBalance = maliciousL2Token.balanceOf(address(batch));
batch.withdrawFailedAmount(address(maliciousL2Token), recipient1);
assertEq(0, batch.failedAmount(address(maliciousL2Token)));
assertEq(thisBalance + 300, maliciousL2Token.balanceOf(recipient1));
assertEq(batchBalance - 300, maliciousL2Token.balanceOf(address(batch)));
// revert no failed
hevm.expectRevert(L2BatchBridgeGateway.ErrorNoFailedDistribution.selector);
batch.withdrawFailedAmount(address(maliciousL2Token), recipient1);
}
function _encodeNodes(
address token,
uint256 batchIndex,
address[] memory receivers,
uint256[] memory amounts
) private returns (bytes32[] memory nodes, bytes32 hash) {
nodes = new bytes32[](receivers.length);
hash = BatchBridgeCodec.encodeInitialNode(token, uint64(batchIndex));
for (uint256 i = 0; i < receivers.length; i++) {
nodes[i] = BatchBridgeCodec.encodeNode(receivers[i], uint96(amounts[i]));
hash = BatchBridgeCodec.hash(hash, nodes[i]);
}
}
}

View File

@@ -0,0 +1,45 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
// solhint-disable no-empty-blocks
contract RevertOnTransferToken is MockERC20 {
bool private revertOnTransfer;
bool private transferReturn;
constructor(
string memory _name,
string memory _symbol,
uint8 _decimals
) MockERC20(_name, _symbol, _decimals) {
transferReturn = true;
}
function setRevertOnTransfer(bool _revertOnTransfer) external payable {
revertOnTransfer = _revertOnTransfer;
}
function setTransferReturn(bool _transferReturn) external payable {
transferReturn = _transferReturn;
}
function transfer(address to, uint256 amount) public virtual override returns (bool) {
if (revertOnTransfer) revert();
if (!transferReturn) return false;
balanceOf[msg.sender] -= amount;
// Cannot overflow because the sum of all user
// balances can't exceed the max uint256 value.
unchecked {
balanceOf[to] += amount;
}
emit Transfer(msg.sender, to, amount);
return true;
}
}

View File

@@ -38,7 +38,7 @@ make lint
## Configure
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
The coordinator behavior can be configured using [`conf/config.json`](conf/config.json). Check the code comments under `ProverManager` in [`internal/config/config.go`](internal/config/config.go) for more details.
## Start

View File

@@ -1,6 +1,6 @@
module scroll-tech/coordinator
go 1.20
go 1.21
require (
github.com/appleboy/gin-jwt/v2 v2.9.1

View File

@@ -59,6 +59,7 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
@@ -95,6 +96,7 @@ github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -117,11 +119,13 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
@@ -144,6 +148,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -153,6 +158,7 @@ github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
@@ -164,6 +170,7 @@ github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -197,6 +204,7 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=

View File

@@ -30,8 +30,8 @@ func (c *Collector) cleanupChallenge() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopCleanChallengeChan:
log.Info("the coordinator cleanupChallenge run loop exit")
return
}
}

View File

@@ -23,7 +23,10 @@ type Collector struct {
db *gorm.DB
ctx context.Context
stopTimeoutChan chan struct{}
stopChunkTimeoutChan chan struct{}
stopBatchTimeoutChan chan struct{}
stopBatchAllChunkReadyChan chan struct{}
stopCleanChallengeChan chan struct{}
proverTaskOrm *orm.ProverTask
chunkOrm *orm.Chunk
@@ -40,14 +43,17 @@ type Collector struct {
// NewCollector create a collector to cron collect the data to send to prover
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
c := &Collector{
cfg: cfg,
db: db,
ctx: ctx,
stopTimeoutChan: make(chan struct{}),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
challenge: orm.NewChallenge(db),
cfg: cfg,
db: db,
ctx: ctx,
stopChunkTimeoutChan: make(chan struct{}),
stopBatchTimeoutChan: make(chan struct{}),
stopBatchAllChunkReadyChan: make(chan struct{}),
stopCleanChallengeChan: make(chan struct{}),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
challenge: orm.NewChallenge(db),
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_timeout_checker_run_total",
@@ -83,10 +89,13 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
// Stop all the collector
func (c *Collector) Stop() {
c.stopTimeoutChan <- struct{}{}
c.stopChunkTimeoutChan <- struct{}{}
c.stopBatchTimeoutChan <- struct{}{}
c.stopBatchAllChunkReadyChan <- struct{}{}
c.stopCleanChallengeChan <- struct{}{}
}
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
// timeoutBatchProofTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutBatchProofTask() {
defer func() {
@@ -113,8 +122,8 @@ func (c *Collector) timeoutBatchProofTask() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopBatchTimeoutChan:
log.Info("the coordinator timeoutBatchProofTask run loop exit")
return
}
}
@@ -146,8 +155,8 @@ func (c *Collector) timeoutChunkProofTask() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopChunkTimeoutChan:
log.Info("the coordinator timeoutChunkProofTask run loop exit")
return
}
}
@@ -253,8 +262,8 @@ func (c *Collector) checkBatchAllChunkReady() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopBatchAllChunkReadyChan:
log.Info("the coordinator checkBatchAllChunkReady run loop exit")
return
}
}

View File

@@ -21,5 +21,5 @@ func NewLoginLogic(db *gorm.DB) *LoginLogic {
// InsertChallengeString insert and check the challenge string is existed
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.challengeOrm.InsertChallenge(ctx, challenge)
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
}

View File

@@ -83,7 +83,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
var endChunkIndex uint64 = math.MaxInt64
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
@@ -94,7 +94,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
startChunkIndex = startChunk.Index
}
if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
@@ -112,7 +112,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -121,7 +121,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -133,7 +133,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx, tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
@@ -168,13 +168,13 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx, &proverTask)
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
@@ -242,7 +242,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx, batchTask.Hash); err != nil {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
}
}

View File

@@ -85,7 +85,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -94,7 +94,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -106,7 +106,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx, tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
@@ -140,13 +140,13 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx, &proverTask)
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)

View File

@@ -100,7 +100,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx, publicKey.(string))
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
}
@@ -108,7 +108,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
}

View File

@@ -139,14 +139,14 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
var proverTask *orm.ProverTask
var err error
if proofParameter.UUID != "" {
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx, proofParameter.UUID, pk)
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
} else {
// TODO When prover all have upgrade, need delete this logic
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
@@ -159,7 +159,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
return err
}
@@ -175,7 +175,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
@@ -191,10 +191,10 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
return ErrCoordinatorInternalFailure
}

View File

@@ -19,13 +19,12 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/database/migrate"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"

View File

@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(17), cur)
assert.Equal(t, int64(20), cur)
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(17), cur)
assert.Equal(t, int64(20), cur)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(17), version)
assert.Equal(t, int64(20), version)
assert.NoError(t, Rollback(pgDB, nil))

View File

@@ -0,0 +1,18 @@
-- +goose Up
-- +goose StatementBegin
create index if not exists idx_prover_task_created_at on prover_task(created_at) where deleted_at IS NULL;
create index if not exists idx_prover_task_task_id on prover_task(task_id) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists idx_prover_task_created_at;
drop index if exists idx_prover_task_task_id;
-- +goose StatementEnd

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE l1_block
ADD COLUMN blob_base_fee BIGINT DEFAULT 0;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS l1_block
DROP COLUMN blob_base_fee;
-- +goose StatementEnd

View File

@@ -0,0 +1,17 @@
-- +goose Up
-- +goose StatementBegin
DROP INDEX if exists idx_prover_block_list_on_public_key;
CREATE UNIQUE INDEX if not exists uniq_prover_block_list_on_public_key ON prover_block_list(public_key) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
CREATE INDEX if not exists idx_prover_block_list_on_public_key ON prover_block_list(public_key);
DROP INDEX if exists uniq_prover_block_list_on_public_key;
-- +goose StatementEnd

2
l2geth

Submodule l2geth updated: 38a3a9c919...246955a4df

View File

@@ -9,12 +9,12 @@ import (
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/prover/config"
"scroll-tech/common/cmd"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/prover/config"
)
var (

5134
prover_rust/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

46
prover_rust/Cargo.toml Normal file
View File

@@ -0,0 +1,46 @@
[package]
name = "prover_rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[patch.crates-io]
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
[dependencies]
anyhow = "1.0"
log = "0.4"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.0rc4", default-features = false, features = ["parallel_syn", "scroll"] }
eth-types = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3" }
base64 = "0.13.1"
reqwest = { version = "0.12.4", features = ["gzip"] }
once_cell = "1.19.0"
hex = "0.4.3"
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
rand = "0.8.5"
eth-keystore = "0.5.0"
rlp = "0.5.2"
tokio = "1.37.0"
env_logger = "0.11.3"
sled = "0.34.7"
http = "1.1.0"

27
prover_rust/Makefile Normal file
View File

@@ -0,0 +1,27 @@
.PHONY: prover
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
GIT_REV=$(shell git rev-parse --short HEAD)
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
endif
prover:
GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
rm -rf ./lib && mkdir ./lib
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib

22
prover_rust/config.json Normal file
View File

@@ -0,0 +1,22 @@
{
"prover_name": "prover-1",
"hard_fork_name": "homestead",
"keystore_path": "keystore.json",
"keystore_password": "prover-pwd",
"db_path": "unique-db-path-for-prover-1",
"core": {
"params_path": "params",
"assets_path": "assets",
"proof_type": 2
},
"coordinator": {
"base_url": "http://localhost:8555",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "http://localhost:9999",
"confirmations": "0x1"
}
}

View File

@@ -0,0 +1,21 @@
#!/bin/bash
config_file="$HOME/.cargo/config"
if [ ! -e "$config_file" ]; then
exit 0
fi
if [[ $(head -n 1 "$config_file") == "#"* ]]; then
exit 0
fi
halo2gpu_path=$(grep -Po '(?<=paths = \[")([^"]*)' $config_file)
pushd $halo2gpu_path
commit_hash=$(git log --pretty=format:%h -n 1)
echo "${commit_hash:0:7}"
popd

View File

@@ -0,0 +1 @@
nightly-2023-12-03

9
prover_rust/rustfmt.toml Normal file
View File

@@ -0,0 +1,9 @@
edition = "2021"
comment_width = 100
imports_granularity = "Crate"
max_width = 100
newline_style = "Unix"
# normalize_comments = true
reorder_imports = true
wrap_comments = true

57
prover_rust/src/config.rs Normal file
View File

@@ -0,0 +1,57 @@
use ethers_core::types::BlockNumber;
use serde::{Deserialize, Serialize};
// use serde_json::Error;
use std::{error::Error, fs::File};
use crate::types::ProofType;
#[derive(Debug, Serialize, Deserialize)]
pub struct ProverCoreConfig {
pub params_path: String,
pub assets_path: String,
#[serde(default)]
pub proof_type: ProofType,
#[serde(default)]
pub dump_dir: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CoordinatorConfig {
pub base_url: String,
pub retry_count: u16,
pub retry_wait_time_sec: u32,
pub connection_timeout_sec: u32,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct L2GethConfig {
pub endpoint: String,
pub confirmations: BlockNumber,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
pub prover_name: String,
pub hard_fork_name: String,
pub keystore_path: String,
pub keystore_password: String,
pub db_path: String,
pub core: ProverCoreConfig,
pub coordinator: CoordinatorConfig,
pub l2geth: Option<L2GethConfig>,
}
impl Config {
pub fn from_reader<R>(reader: R) -> Result<Self, Box<dyn Error>>
where
R: std::io::Read,
{
serde_json::from_reader(reader).map_err(|e| Box::new(e) as Box<dyn Error>)
}
pub fn from_file(file_name: String) -> Result<Self, Box<dyn Error>> {
let file = File::open(file_name)?;
Config::from_reader(&file)
}
}

View File

@@ -0,0 +1,137 @@
mod api;
mod errors;
pub mod listener;
pub mod types;
use anyhow::{bail, Context, Ok, Result};
use std::rc::Rc;
use api::API;
use errors::*;
use listener::Listener;
use log;
use tokio::runtime::Runtime;
use types::*;
use crate::key_signer::KeySigner;
pub struct Config {
pub endpoint: String,
pub prover_name: String,
pub prover_version: String,
pub hard_fork_name: String,
}
pub struct CoordinatorClient {
api: API,
token: Option<String>,
config: Config,
key_signer: Rc<KeySigner>,
rt: Runtime,
listener: Box<dyn Listener>,
}
impl CoordinatorClient {
pub fn new(
config: Config,
key_signer: Rc<KeySigner>,
listener: Box<dyn Listener>,
) -> Result<Self> {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
let mut client = Self {
api: API::new(&config.endpoint)?,
token: None,
config,
key_signer,
rt,
listener,
};
client.login()?;
Ok(client)
}
fn login(&mut self) -> Result<()> {
let api = &self.api;
let challenge_response = self.rt.block_on(api.challenge())?;
if challenge_response.errcode != Success {
bail!("challenge failed: {}", challenge_response.errmsg)
}
let mut token: String;
if let Some(r) = challenge_response.data {
token = r.token;
} else {
bail!("challenge failed: got empty token")
}
let login_message = LoginMessage {
challenge: token.clone(),
prover_name: self.config.prover_name.clone(),
prover_version: self.config.prover_version.clone(),
hard_fork_name: self.config.hard_fork_name.clone(),
};
let buffer = login_message.rlp();
let signature = self.key_signer.sign_buffer(&buffer)?;
let login_request = LoginRequest {
message: login_message,
signature: signature,
};
let login_response = self.rt.block_on(api.login(&login_request, &token))?;
if login_response.errcode != Success {
bail!("login failed: {}", login_response.errmsg)
}
if let Some(r) = login_response.data {
token = r.token;
} else {
bail!("login failed: got empty token")
}
self.token = Some(token);
Ok(())
}
fn action_with_re_login<T, F, R>(&mut self, req: &R, mut f: F) -> Result<Response<T>>
where
F: FnMut(&mut Self, &R) -> Result<Response<T>>,
{
let response = f(self, req)?;
if response.errcode == ErrJWTTokenExpired {
log::info!("JWT expired, attempting to re-login");
self.login().context("JWT expired, re-login failed")?;
log::info!("re-login success");
return f(self, req);
} else if response.errcode != Success {
bail!("action failed: {}", response.errmsg)
}
Ok(response)
}
fn do_get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
self.rt
.block_on(self.api.get_task(req, self.token.as_ref().unwrap()))
}
pub fn get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
self.action_with_re_login(req, |s, req| s.do_get_task(req))
}
fn do_submit_proof(
&mut self,
req: &SubmitProofRequest,
) -> Result<Response<SubmitProofResponseData>> {
let response = self
.rt
.block_on(self.api.submit_proof(req, &self.token.as_ref().unwrap()))?;
self.listener.on_proof_submitted(req);
Ok(response)
}
pub fn submit_proof(
&mut self,
req: &SubmitProofRequest,
) -> Result<Response<SubmitProofResponseData>> {
self.action_with_re_login(req, |s, req| s.do_submit_proof(req))
}
}

View File

@@ -0,0 +1,105 @@
use super::types::*;
use anyhow::{bail, Result};
use reqwest::{header::CONTENT_TYPE, Url};
use serde::Serialize;
pub struct API {
url_base: Url,
pub client: reqwest::Client,
}
impl API {
pub fn new(url_base: &String) -> Result<Self> {
Ok(Self {
url_base: Url::parse(&url_base)?,
client: reqwest::Client::new(),
})
}
pub async fn challenge(&self) -> Result<Response<ChallengeResponseData>> {
let method = "/coordinator/v1/challenge";
let url = self.build_url(method)?;
let response = self
.client
.get(url)
.header(CONTENT_TYPE, "application/json")
.send()
.await?;
let response_body = response.text().await?;
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
}
pub async fn login(
&self,
req: &LoginRequest,
token: &String,
) -> Result<Response<LoginResponseData>> {
let method = "/coordinator/v1/login";
self.post_with_token(&method, req, token).await
}
pub async fn get_task(
&self,
req: &GetTaskRequest,
token: &String,
) -> Result<Response<GetTaskResponseData>> {
let method = "/coordinator/v1/get_task";
self.post_with_token(&method, req, token).await
}
pub async fn submit_proof(
&self,
req: &SubmitProofRequest,
token: &String,
) -> Result<Response<SubmitProofResponseData>> {
let method = "/coordinator/v1/submit_proof";
self.post_with_token(&method, req, token).await
}
async fn post_with_token<Req, Resp>(
&self,
method: &str,
req: &Req,
token: &String,
) -> Result<Resp>
where
Req: ?Sized + Serialize,
Resp: serde::de::DeserializeOwned,
{
let url = self.build_url(method)?;
let request_body = serde_json::to_string(req)?;
log::info!("[coordinator client], {method}, request: {request_body}");
let response = self
.client
.post(url)
.header(CONTENT_TYPE, "application/json")
.bearer_auth(token)
.body(request_body)
.send()
.await?;
if response.status() != http::status::StatusCode::OK {
log::error!(
"[coordinator client], {method}, status not ok: {}",
response.status()
);
bail!(
"[coordinator client], {method}, status not ok: {}",
response.status()
)
}
let response_body = response.text().await?;
log::info!("[coordinator client], {method}, response: {response_body}");
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
}
fn build_url(&self, method: &str) -> Result<Url> {
self.url_base.join(method).map_err(|e| anyhow::anyhow!(e))
}
}

View File

@@ -0,0 +1,17 @@
// TODO: refactor using enum
pub type ErrorCode = i32;
pub const Success: ErrorCode = 0;
pub const InternalServerError: ErrorCode = 500;
pub const ErrJWTCommonErr: ErrorCode = 50000;
pub const ErrJWTTokenExpired: ErrorCode = 50001;
pub const ErrProverStatsAPIParameterInvalidNo: ErrorCode = 10001;
pub const ErrProverStatsAPIProverTaskFailure: ErrorCode = 10002;
pub const ErrProverStatsAPIProverTotalRewardFailure: ErrorCode = 10003;
pub const ErrCoordinatorParameterInvalidNo: ErrorCode = 20001;
pub const ErrCoordinatorGetTaskFailure: ErrorCode = 20002;
pub const ErrCoordinatorHandleZkProofFailure: ErrorCode = 20003;
pub const ErrCoordinatorEmptyProofData: ErrorCode = 20004;

View File

@@ -0,0 +1,5 @@
use super::SubmitProofRequest;
pub trait Listener {
fn on_proof_submitted(&self, req: &SubmitProofRequest);
}

View File

@@ -0,0 +1,76 @@
use crate::types::{ProofFailureType, ProofStatus};
use rlp::RlpStream;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct Response<T> {
pub errcode: i32,
pub errmsg: String,
pub data: Option<T>,
}
#[derive(Serialize, Deserialize)]
pub struct LoginMessage {
pub challenge: String,
pub prover_name: String,
pub prover_version: String,
pub hard_fork_name: String,
}
impl LoginMessage {
pub fn rlp(&self) -> Vec<u8> {
let mut rlp = RlpStream::new();
let num_fields = 4;
rlp.begin_list(num_fields);
rlp.append(&self.prover_name);
rlp.append(&self.prover_version);
rlp.append(&self.challenge);
rlp.append(&self.hard_fork_name);
rlp.out().freeze().into()
}
}
#[derive(Serialize, Deserialize)]
pub struct LoginRequest {
pub message: LoginMessage,
pub signature: String,
}
#[derive(Serialize, Deserialize)]
pub struct LoginResponseData {
pub time: String,
pub token: String,
}
pub type ChallengeResponseData = LoginResponseData;
#[derive(Default, Serialize, Deserialize)]
pub struct GetTaskRequest {
pub task_type: crate::types::ProofType,
pub prover_height: Option<u64>,
pub vks: Vec<String>,
pub vk: String,
}
#[derive(Serialize, Deserialize)]
pub struct GetTaskResponseData {
pub uuid: String,
pub task_id: String,
pub task_type: crate::types::ProofType,
pub task_data: String,
pub hard_fork_name: Option<String>,
}
#[derive(Serialize, Deserialize, Default)]
pub struct SubmitProofRequest {
pub uuid: String,
pub task_id: String,
pub task_type: crate::types::ProofType,
pub status: ProofStatus,
pub proof: String,
pub failure_type: Option<ProofFailureType>,
pub failure_msg: Option<String>,
}
#[derive(Serialize, Deserialize)]
pub struct SubmitProofResponseData {}

View File

@@ -0,0 +1,81 @@
pub mod types;
use crate::types::CommonHash;
use anyhow::Result;
use ethers_core::types::BlockNumber;
use tokio::runtime::Runtime;
use types::{BlockTrace, Header};
use ethers_providers::{Http, Provider};
/// Serialize a type.
///
/// # Panics
///
/// If the type returns an error during serialization.
pub fn serialize<T: serde::Serialize>(t: &T) -> serde_json::Value {
serde_json::to_value(t).expect("Types never fail to serialize.")
}
pub struct GethClient {
id: String,
provider: Provider<Http>,
rt: Runtime,
}
impl GethClient {
pub fn new(id: &str, api_url: &str) -> Result<Self> {
let provider = Provider::<Http>::try_from(api_url)?;
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
Ok(Self {
id: id.to_string(),
provider,
rt,
})
}
pub fn get_block_trace_by_hash(&mut self, hash: &CommonHash) -> Result<BlockTrace> {
log::info!(
"{}: calling get_block_trace_by_hash, hash: {}",
self.id,
hash
);
let trace_future = self
.provider
.request("scroll_getBlockTraceByNumberOrHash", [format!("{hash:#x}")]);
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
pub fn header_by_number(&mut self, block_number: &BlockNumber) -> Result<Header> {
log::info!(
"{}: calling header_by_number, hash: {}",
self.id,
block_number
);
let hash = serialize(block_number);
let include_txs = serialize(&false);
let trace_future = self
.provider
.request("eth_getBlockByNumber", [hash, include_txs]);
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
pub fn block_number(&mut self) -> Result<BlockNumber> {
log::info!("{}: calling block_number", self.id);
let trace_future = self.provider.request("eth_blockNumber", ());
let trace = self.rt.block_on(trace_future)?;
Ok(trace)
}
}

View File

@@ -0,0 +1,40 @@
use eth_types::{H256, U64};
use serde::{Deserialize, Serialize};
use crate::types::CommonHash;
use prover::BlockTrace as ProverBlockTrace;
/// l2 block full trace
#[derive(Deserialize, Serialize, Default, Debug, Clone)]
pub struct BlockTrace {
#[serde(flatten)]
pub block_trace: ProverBlockTrace,
pub version: String,
pub withdraw_trie_root: Option<CommonHash>,
#[serde(rename = "mptwitness", default)]
pub mpt_witness: Vec<u8>,
}
pub fn get_block_number(block_trace: &ProverBlockTrace) -> Option<u64> {
block_trace.header.number.map(|n| n.as_u64())
}
pub type TxHash = H256;
/// this struct is tracked to https://github.com/scroll-tech/go-ethereum/blob/0f0cd99f7a2e/core/types/block.go#Header
/// the detail fields of struct are not 100% same as eth_types::Block so this needs to be changed in
/// some time currently only the `number` field is required
#[derive(Debug, Deserialize, Serialize, Default)]
pub struct Header {
#[serde(flatten)]
block: eth_types::Block<TxHash>,
}
impl Header {
pub fn get_number(&self) -> Option<U64> {
self.block.number
}
}

View File

@@ -0,0 +1,105 @@
use std::path::Path;
use anyhow::Result;
use ethers_core::{
k256::{
ecdsa::{signature::hazmat::PrehashSigner, RecoveryId, Signature, SigningKey},
elliptic_curve::{sec1::ToEncodedPoint, FieldBytes},
PublicKey, Secp256k1, SecretKey,
},
types::Signature as EthSignature,
};
use eth_types::{H256, U256};
use hex::ToHex;
use tiny_keccak::{Hasher, Keccak};
pub struct KeySigner {
public_key: PublicKey,
signer: SigningKey,
}
impl KeySigner {
pub fn new(key_path: &str, passwd: &str) -> Result<Self> {
let p = Path::new(key_path);
let secret = if !p.exists() {
let dir = p.parent().unwrap();
let name = p.file_name().and_then(|s| s.to_str());
let mut rng = rand::thread_rng();
let (secret, _) = eth_keystore::new(dir, &mut rng, passwd, name)?;
secret
} else {
eth_keystore::decrypt_key(key_path, passwd).map_err(|e| anyhow::anyhow!(e))?
};
let secret_key = SecretKey::from_bytes(secret.as_slice().into())?;
let signer = SigningKey::from(secret_key.clone());
Ok(Self {
public_key: secret_key.public_key(),
signer: signer,
})
}
pub fn get_public_key(&self) -> String {
let v: Vec<u8> = Vec::from(self.public_key.to_encoded_point(true).as_bytes());
buffer_to_hex(&v, false)
}
/// Signs the provided hash.
pub fn sign_hash(&self, hash: H256) -> Result<EthSignature> {
let signer = &self.signer as &dyn PrehashSigner<(Signature, RecoveryId)>;
let (recoverable_sig, recovery_id) = signer.sign_prehash(hash.as_ref())?;
let v = u8::from(recovery_id) as u64;
let r_bytes: FieldBytes<Secp256k1> = recoverable_sig.r().into();
let s_bytes: FieldBytes<Secp256k1> = recoverable_sig.s().into();
let r = U256::from_big_endian(r_bytes.as_slice());
let s = U256::from_big_endian(s_bytes.as_slice());
Ok(EthSignature { r, s, v })
}
pub fn sign_buffer<T>(&self, buffer: &T) -> Result<String>
where
T: AsRef<[u8]>,
{
let pre_hash = keccak256(buffer);
let hash_str = buffer_to_hex(&pre_hash, true);
println!("hash is {hash_str}");
let hash = H256::from(pre_hash);
let sig = self.sign_hash(hash)?;
Ok(buffer_to_hex(&sig.to_vec(), true))
}
}
fn buffer_to_hex<T>(buffer: &T, has_prefix: bool) -> String
where
T: AsRef<[u8]>,
{
if has_prefix {
format!("0x{}", buffer.encode_hex::<String>())
} else {
buffer.encode_hex::<String>()
}
}
/// Compute the Keccak-256 hash of input bytes.
///
/// Note that strings are interpreted as UTF-8 bytes,
// TODO: Add Solidity Keccak256 packing support
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> [u8; 32] {
let mut output = [0u8; 32];
let mut hasher = Keccak::v256();
hasher.update(bytes.as_ref());
hasher.finalize(&mut output);
output
}

142
prover_rust/src/main.rs Normal file
View File

@@ -0,0 +1,142 @@
mod config;
mod coordinator_client;
mod geth_client;
mod key_signer;
mod prover;
mod task_cache;
mod types;
mod utils_log;
mod version;
mod zk_circuits_handler;
use anyhow::{Context, Result};
use config::Config;
use coordinator_client::listener::Listener;
use log;
use prover::Prover;
use core::time;
use std::rc::Rc;
use task_cache::TaskCache;
use types::TaskWrapper;
struct ClearCacheCoordinatorListener {
pub task_cache: Rc<TaskCache>,
}
impl Listener for ClearCacheCoordinatorListener {
fn on_proof_submitted(&self, req: &coordinator_client::types::SubmitProofRequest) {
let result = self.task_cache.delete_task(req.task_id.clone());
if let Err(e) = result {
log::error!("delete task from embed db failed, {}", e.to_string());
} else {
log::info!(
"delete task from embed db successfully, task_id: {}",
&req.task_id
);
}
}
}
struct TaskProcessor<'a> {
prover: &'a Prover<'a>,
task_cache: Rc<TaskCache>,
}
impl<'a> TaskProcessor<'a> {
pub fn new(prover: &'a Prover, task_cache: Rc<TaskCache>) -> Self {
TaskProcessor { prover, task_cache }
}
pub fn start(&self) {
loop {
log::info!("start a new round.");
if let Err(err) = self.prove_and_submit() {
log::error!("encounter error: {err}");
} else {
log::info!("prove & submit succeed.");
}
}
}
fn prove_and_submit(&self) -> Result<()> {
let task_from_cache = self
.task_cache
.get_last_task()
.context("failed to peek from stack")?;
let mut task_wrapper = match task_from_cache {
Some(t) => t,
None => {
let fetch_result = self.prover.fetch_task();
if let Err(err) = fetch_result {
std::thread::sleep(time::Duration::from_secs(10));
return Err(err).context("failed to fetch task from coordinator");
}
let task_wrapper: TaskWrapper = fetch_result.unwrap().into();
self.task_cache
.put_task(&task_wrapper)
.context("failed to push task into stack")?;
task_wrapper
}
};
if task_wrapper.get_count() <= 2 {
task_wrapper.increment_count();
self.task_cache
.put_task(&task_wrapper)
.context("failed to push task into stack, updating count")?;
log::info!(
"start to prove task, task_type: {:?}, task_id: {}",
task_wrapper.task.task_type,
task_wrapper.task.id
);
let result = match self.prover.prove_task(&task_wrapper.task) {
Ok(proof_detail) => self
.prover
.submit_proof(&proof_detail, task_wrapper.task.uuid.clone()),
Err(error) => self.prover.submit_error(
&task_wrapper.task,
types::ProofFailureType::NoPanic,
error,
),
};
return result;
}
// if tried times >= 3, it's probably due to circuit proving panic
log::error!(
"zk proving panic for task, task_type: {:?}, task_id: {}",
task_wrapper.task.task_type,
task_wrapper.task.id
);
self.prover.submit_error(
&task_wrapper.task,
types::ProofFailureType::Panic,
anyhow::anyhow!("zk proving panic for task"),
)
}
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
utils_log::log_init();
let file_name = "config.json";
let config: Config = Config::from_file(file_name.to_string())?;
println!("{:?}", config);
let task_cache = Rc::new(TaskCache::new(&config.db_path)?);
let coordinator_listener = Box::new(ClearCacheCoordinatorListener {
task_cache: task_cache.clone(),
});
let prover = Prover::new(&config, coordinator_listener)?;
let task_processer = TaskProcessor::new(&prover, task_cache);
task_processer.start();
Ok(())
}

324
prover_rust/src/prover.rs Normal file
View File

@@ -0,0 +1,324 @@
use anyhow::{bail, Error, Ok, Result};
use eth_types::U64;
use once_cell::sync::Lazy;
use std::{cell::RefCell, cmp::Ordering, env, rc::Rc};
use crate::{
config::Config,
coordinator_client::{
listener::Listener, types::*, Config as CoordinatorConfig, CoordinatorClient,
},
geth_client::{types::get_block_number, GethClient},
key_signer::KeySigner,
types::{CommonHash, ProofFailureType, ProofStatus, ProofType},
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
};
use super::types::{ProofDetail, Task};
use prover::{BlockTrace, ChunkHash, ChunkProof};
// Only used for debugging.
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
pub struct Prover<'a> {
config: &'a Config,
key_signer: Rc<KeySigner>,
circuits_handler_provider: CircuitsHandlerProvider,
coordinator_client: RefCell<CoordinatorClient>,
geth_client: Option<RefCell<GethClient>>,
}
// a u64 is positive when it's 63th index bit not set
fn is_positive(n: &U64) -> bool {
!n.bit(63)
}
impl<'a> Prover<'a> {
pub fn new(config: &'a Config, coordinator_listener: Box<dyn Listener>) -> Result<Self> {
let proof_type = config.core.proof_type;
let params_path = &config.core.params_path;
let assets_path = &config.core.assets_path;
let keystore_path = &config.keystore_path;
let keystore_password = &config.keystore_password;
let coordinator_config = CoordinatorConfig {
endpoint: config.coordinator.base_url.clone(),
prover_name: config.prover_name.clone(),
prover_version: crate::version::get_version(),
hard_fork_name: config.hard_fork_name.clone(),
};
let key_signer = Rc::new(KeySigner::new(&keystore_path, &keystore_password)?);
let coordinator_client = CoordinatorClient::new(
coordinator_config,
Rc::clone(&key_signer),
coordinator_listener,
)?;
let mut prover = Prover {
config,
key_signer: Rc::clone(&key_signer),
circuits_handler_provider: CircuitsHandlerProvider::new(
proof_type,
params_path,
assets_path,
)?,
coordinator_client: RefCell::new(coordinator_client),
geth_client: None,
};
if config.core.proof_type == ProofType::ProofTypeChunk {
prover.geth_client = Some(RefCell::new(GethClient::new(
"test",
&config.l2geth.as_ref().unwrap().endpoint,
)?));
}
Ok(prover)
}
pub fn get_proof_type(&self) -> ProofType {
self.config.core.proof_type
}
pub fn get_public_key(&self) -> String {
self.key_signer.get_public_key()
}
pub fn fetch_task(&self) -> Result<Task> {
let vks = self.circuits_handler_provider.get_vks();
let vk = vks[0].clone();
let mut req = GetTaskRequest {
task_type: self.get_proof_type(),
prover_height: None,
vks,
vk,
};
if self.get_proof_type() == ProofType::ProofTypeChunk {
let latest_block_number = self.get_latest_block_number_value()?;
if let Some(v) = latest_block_number {
if v.as_u64() == 0 {
bail!("omit to prove task of the genesis block")
}
req.prover_height = Some(v.as_u64());
} else {
bail!("failed to fetch latest confirmed block number, got None")
}
}
let resp = self.coordinator_client.borrow_mut().get_task(&req)?;
Task::try_from(&resp.data.unwrap()).map_err(|e| anyhow::anyhow!(e))
}
pub fn prove_task(&self, task: &Task) -> Result<ProofDetail> {
let version = task.get_version();
if let Some(handler) = self.circuits_handler_provider.get_circuits_client(version) {
self.do_prove(task, handler)
} else {
bail!("failed to get a circuit handler")
}
}
fn do_prove(&self, task: &Task, handler: &Box<dyn CircuitsHandler>) -> Result<ProofDetail> {
let mut proof_detail = ProofDetail {
id: task.id.clone(),
proof_type: task.task_type,
..Default::default()
};
match task.task_type {
ProofType::ProofTypeBatch => {
let chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)> =
self.gen_chunk_hashes_proofs(task)?;
let chunk_proofs: Vec<ChunkProof> =
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
let is_valid = handler.aggregator_check_chunk_proofs(&chunk_proofs)?;
if !is_valid {
bail!("non-match chunk protocol, task-id: {}", &task.id)
}
let batch_proof = handler.aggregator_gen_agg_evm_proof(
chunk_hashes_proofs,
None,
self.get_output_dir(),
)?;
proof_detail.batch_proof = Some(batch_proof);
Ok(proof_detail)
}
ProofType::ProofTypeChunk => {
let chunk_trace = self.gen_chunk_traces(task)?;
let chunk_proof = handler.prover_gen_chunk_proof(
chunk_trace,
None,
None,
self.get_output_dir(),
)?;
proof_detail.chunk_proof = Some(chunk_proof);
Ok(proof_detail)
}
_ => bail!("task type invalid"),
}
}
pub fn submit_proof(&self, proof_detail: &ProofDetail, uuid: String) -> Result<()> {
let proof_data = match proof_detail.proof_type {
ProofType::ProofTypeBatch => {
serde_json::to_string(proof_detail.batch_proof.as_ref().unwrap())?
}
ProofType::ProofTypeChunk => {
serde_json::to_string(proof_detail.chunk_proof.as_ref().unwrap())?
}
_ => unreachable!(),
};
let request = SubmitProofRequest {
uuid,
task_id: proof_detail.id.clone(),
task_type: proof_detail.proof_type,
status: ProofStatus::Ok,
proof: proof_data,
..Default::default()
};
self.do_submit(&request)
}
pub fn submit_error(
&self,
task: &Task,
failure_type: ProofFailureType,
error: Error,
) -> Result<()> {
let request = SubmitProofRequest {
uuid: task.uuid.clone(),
task_id: task.id.clone(),
task_type: task.task_type,
status: ProofStatus::Error,
failure_type: Some(failure_type),
failure_msg: Some(error.to_string()),
..Default::default()
};
self.do_submit(&request)
}
fn do_submit(&self, request: &SubmitProofRequest) -> Result<()> {
self.coordinator_client.borrow_mut().submit_proof(request)?;
Ok(())
}
fn get_latest_block_number_value(&self) -> Result<Option<U64>> {
let number = self
.geth_client
.as_ref()
.unwrap()
.borrow_mut()
.block_number()?;
Ok(number.as_number())
}
// fn get_configured_block_number_value(&self) -> Result<Option<U64>> {
// self.get_block_number_value(&self.config.l2geth.as_ref().unwrap().confirmations)
// }
// fn get_block_number_value(&self, block_number: &BlockNumber) -> Result<Option<U64>> {
// match block_number {
// BlockNumber::Safe | BlockNumber::Finalized => {
// let header =
// self.geth_client.as_ref().unwrap().borrow_mut().header_by_number(block_number)?;
// Ok(header.get_number())
// },
// BlockNumber::Latest => {
// let number = self.geth_client.as_ref().unwrap().borrow_mut().block_number()?;
// Ok(number.as_number())
// },
// BlockNumber::Number(n) if is_positive(n) => {
// let number = self.geth_client.as_ref().unwrap().borrow_mut().block_number()?;
// let diff = number.as_number()
// .filter(|m| m.as_u64() >= n.as_u64())
// .map(|m| U64::from(m.as_u64() - n.as_u64()));
// Ok(diff)
// },
// _ => bail!("unknown confirmation type"),
// }
// }
fn get_output_dir(&self) -> Option<&str> {
OUTPUT_DIR.as_deref()
}
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
if let Some(chunk_detail) = task.chunk_task_detail.as_ref() {
self.get_sorted_traces_by_hashes(&chunk_detail.block_hashes)
} else {
bail!("invalid task")
}
}
fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result<Vec<(ChunkHash, ChunkProof)>> {
if let Some(batch_detail) = task.batch_task_detail.as_ref() {
Ok(batch_detail
.chunk_infos
.clone()
.into_iter()
.zip(batch_detail.chunk_proofs.clone())
.collect())
} else {
bail!("invalid task")
}
}
fn get_sorted_traces_by_hashes(
&self,
block_hashes: &Vec<CommonHash>,
) -> Result<Vec<BlockTrace>> {
if block_hashes.len() == 0 {
bail!("blockHashes is empty")
}
let mut block_traces = Vec::new();
for (_, hash) in block_hashes.into_iter().enumerate() {
let trace = self
.geth_client
.as_ref()
.unwrap()
.borrow_mut()
.get_block_trace_by_hash(hash)?;
block_traces.push(trace.block_trace);
}
block_traces.sort_by(|a, b| {
if get_block_number(a) == None {
Ordering::Less
} else if get_block_number(b) == None {
Ordering::Greater
} else {
get_block_number(a)
.unwrap()
.cmp(&get_block_number(b).unwrap())
}
});
let block_numbers: Vec<u64> = block_traces
.iter()
.map(|trace| match get_block_number(trace) {
Some(v) => v,
None => 0,
})
.collect();
let mut i = 0;
while i < block_numbers.len() - 1 {
if block_numbers[i] + 1 != block_numbers[i + 1] {
bail!(
"block numbers are not continuous, got {} and {}",
block_numbers[i],
block_numbers[i + 1]
)
}
i += 1;
}
Ok(block_traces)
}
}

View File

@@ -0,0 +1,40 @@
use anyhow::{Ok, Result};
use crate::types::TaskWrapper;
use sled::{Config, Db};
pub struct TaskCache {
db: Db,
}
impl TaskCache {
pub fn new(db_path: &String) -> Result<Self> {
let config = Config::new().path(db_path);
let db = config.open()?;
Ok(Self{db})
}
pub fn put_task(&self, task_wrapper: &TaskWrapper) -> Result<()> {
let k = task_wrapper.task.id.clone().into_bytes();
let v = serde_json::to_vec(task_wrapper)?;
self.db.insert(k, v)?;
Ok(())
}
pub fn get_last_task(&self) -> Result<Option<TaskWrapper>> {
let last = self.db.last()?;
if let Some((k, v)) = last {
let kk = std::str::from_utf8(k.as_ref())?;
log::info!("get last task, task_id: {kk}");
let task_wrapper: TaskWrapper = serde_json::from_slice(v.as_ref())?;
return Ok(Some(task_wrapper));
}
Ok(None)
}
pub fn delete_task(&self, task_id: String) -> Result<()> {
let k = task_id.into_bytes();
self.db.remove(k)?;
Ok(())
}
}

237
prover_rust/src/types.rs Normal file
View File

@@ -0,0 +1,237 @@
use core::fmt;
use eth_types::H256;
use prover::{BatchProof, ChunkHash, ChunkProof};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::coordinator_client::types::GetTaskResponseData;
pub type CommonHash = H256;
pub type Bytes = Vec<u8>;
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofType {
ProofTypeUndefined,
ProofTypeChunk,
ProofTypeBatch,
}
impl ProofType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProofType::ProofTypeChunk,
2 => ProofType::ProofTypeBatch,
_ => ProofType::ProofTypeUndefined,
}
}
}
impl Serialize for ProofType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofType::ProofTypeUndefined => serializer.serialize_i8(0),
ProofType::ProofTypeChunk => serializer.serialize_i8(1),
ProofType::ProofTypeBatch => serializer.serialize_i8(2),
}
}
}
impl<'de> Deserialize<'de> for ProofType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofType::from_u8(v))
}
}
impl Default for ProofType {
fn default() -> Self {
Self::ProofTypeUndefined
}
}
#[derive(Serialize, Deserialize)]
pub struct BatchTaskDetail {
pub chunk_infos: Vec<ChunkHash>,
pub chunk_proofs: Vec<ChunkProof>,
}
#[derive(Serialize, Deserialize)]
pub struct ChunkTaskDetail {
pub block_hashes: Vec<CommonHash>,
}
#[derive(Serialize, Deserialize, Default)]
pub struct Task {
pub uuid: String,
pub id: String,
#[serde(rename = "type", default)]
pub task_type: ProofType,
#[serde(default)]
pub batch_task_detail: Option<BatchTaskDetail>,
#[serde(default)]
pub chunk_task_detail: Option<ChunkTaskDetail>,
#[serde(default)]
pub hard_fork_name: Option<String>,
}
impl Task {
pub fn get_version(&self) -> String {
match self.hard_fork_name.as_ref() {
Some(v) => v.clone(),
None => "".to_string(),
}
}
}
impl TryFrom<&GetTaskResponseData> for Task {
type Error = serde_json::Error;
fn try_from(value: &GetTaskResponseData) -> Result<Self, Self::Error> {
let mut task = Task {
uuid: value.uuid.clone(),
id: value.task_id.clone(),
task_type: value.task_type,
chunk_task_detail: None,
batch_task_detail: None,
hard_fork_name: value.hard_fork_name.clone(),
};
match task.task_type {
ProofType::ProofTypeBatch => {
task.batch_task_detail = Some(serde_json::from_str(&value.task_data)?);
}
ProofType::ProofTypeChunk => {
task.chunk_task_detail = Some(serde_json::from_str(&value.task_data)?);
}
_ => unreachable!(),
}
Ok(task)
}
}
#[derive(Serialize, Deserialize, Default)]
pub struct TaskWrapper {
pub task: Task,
count: usize,
}
impl TaskWrapper {
pub fn increment_count(&mut self) {
self.count += 1;
}
pub fn get_count(&self) -> usize {
self.count
}
}
impl From<Task> for TaskWrapper {
fn from(task: Task) -> Self {
TaskWrapper { task, count: 0 }
}
}
#[derive(Serialize, Deserialize, Default)]
pub struct ProofDetail {
pub id: String,
#[serde(rename = "type", default)]
pub proof_type: ProofType,
pub chunk_proof: Option<ChunkProof>,
pub batch_proof: Option<BatchProof>,
pub error: String,
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofFailureType {
Undefined,
Panic,
NoPanic,
}
impl ProofFailureType {
fn from_u8(v: u8) -> Self {
match v {
1 => ProofFailureType::Panic,
2 => ProofFailureType::NoPanic,
_ => ProofFailureType::Undefined,
}
}
}
impl Serialize for ProofFailureType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofFailureType::Undefined => serializer.serialize_u8(0),
ProofFailureType::Panic => serializer.serialize_u8(1),
ProofFailureType::NoPanic => serializer.serialize_u8(2),
}
}
}
impl<'de> Deserialize<'de> for ProofFailureType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofFailureType::from_u8(v))
}
}
impl Default for ProofFailureType {
fn default() -> Self {
Self::Undefined
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ProofStatus {
Ok,
Error,
}
impl ProofStatus {
fn from_u8(v: u8) -> Self {
match v {
0 => ProofStatus::Ok,
_ => ProofStatus::Error,
}
}
}
impl Serialize for ProofStatus {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ProofStatus::Ok => serializer.serialize_u8(0),
ProofStatus::Error => serializer.serialize_u8(1),
}
}
}
impl<'de> Deserialize<'de> for ProofStatus {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let v: u8 = u8::deserialize(deserializer)?;
Ok(ProofStatus::from_u8(v))
}
}
impl Default for ProofStatus {
fn default() -> Self {
Self::Ok
}
}

View File

@@ -0,0 +1,11 @@
use env_logger::Env;
use std::sync::Once;
static LOG_INIT: Once = Once::new();
/// Initialize log
pub fn log_init() {
LOG_INIT.call_once(|| {
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
});
}

View File

@@ -0,0 +1,17 @@
use std::cell::OnceCell;
static DEFAULT_COMMIT: &str = "unknown";
static mut VERSION: OnceCell<String> = OnceCell::new();
pub const TAG: &str = "v4.4.3";
pub const DEFAULT_ZK_VERSION: &str = "000000-000000";
fn init_version() -> String {
let commit = option_env!("GIT_REV").unwrap_or(DEFAULT_COMMIT);
let zk_version = option_env!("ZK_VERSION").unwrap_or(DEFAULT_ZK_VERSION);
format!("{TAG}-{commit}-{zk_version}")
}
pub fn get_version() -> String {
unsafe { VERSION.get_or_init(init_version).clone() }
}

View File

@@ -0,0 +1,89 @@
mod base;
// mod next;
mod types;
use anyhow::Result;
use base::BaseCircuitsHandler;
use std::collections::HashMap;
use types::{BatchProof, BlockTrace, ChunkHash, ChunkProof};
use crate::types::ProofType;
// use self::next::NextCircuitsHandler;
type CiruitsVersion = String;
pub mod utils {
pub fn encode_vk(vk: Vec<u8>) -> String {
base64::encode(vk)
}
}
pub trait CircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>>;
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof>;
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>>;
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof>;
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool>;
}
pub struct CircuitsHandlerProvider {
proof_type: ProofType,
circuits_handler_map: HashMap<CiruitsVersion, Box<dyn CircuitsHandler>>,
}
impl CircuitsHandlerProvider {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
let mut m: HashMap<CiruitsVersion, Box<dyn CircuitsHandler>> = HashMap::new();
let handler = BaseCircuitsHandler::new(proof_type, params_dir, assets_dir)?;
m.insert("".to_string(), Box::new(handler));
// let next_handler: NextCircuitsHandler = NextCircuitsHandler::new(proof_type, params_dir,
// assets_dir)?; m.insert("".to_string(), Box::new(next_handler));
Ok(CircuitsHandlerProvider {
proof_type: proof_type,
circuits_handler_map: m,
})
}
pub fn get_circuits_client(&self, version: String) -> Option<&Box<dyn CircuitsHandler>> {
self.circuits_handler_map.get(&version)
}
pub fn get_vks(&self) -> Vec<String> {
match self.proof_type {
ProofType::ProofTypeBatch => self
.circuits_handler_map
.values()
.map(|h| {
h.aggregator_get_vk()
.map_or("".to_string(), |vk| utils::encode_vk(vk))
})
.collect::<Vec<String>>(),
ProofType::ProofTypeChunk => self
.circuits_handler_map
.values()
.map(|h| {
h.prover_get_vk()
.map_or("".to_string(), |vk| utils::encode_vk(vk))
})
.collect::<Vec<String>>(),
_ => unreachable!(),
}
}
}

View File

@@ -0,0 +1,91 @@
use super::{
types::{BatchProof, BlockTrace, ChunkHash, ChunkProof},
CircuitsHandler,
};
use crate::types::ProofType;
use anyhow::{bail, Ok, Result};
use prover::{aggregator::Prover as BatchProver, zkevm::Prover as ChunkProver};
use std::cell::RefCell;
#[derive(Default)]
pub struct BaseCircuitsHandler {
chunk_prover: Option<RefCell<ChunkProver>>,
batch_prover: Option<RefCell<BatchProver>>,
}
impl BaseCircuitsHandler {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
match proof_type {
ProofType::ProofTypeChunk => Ok(Self {
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
..Default::default()
}),
ProofType::ProofTypeBatch => Ok(Self {
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
..Default::default()
}),
_ => bail!("proof type invalid"),
}
}
}
impl CircuitsHandler for BaseCircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [base], [chunk] get_vk");
self.chunk_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof> {
log::info!("[circuit handler], [base], [chunk] gen_chunk_proof");
if let Some(prover) = self.chunk_prover.as_ref() {
return prover
.borrow_mut()
.gen_chunk_proof(chunk_trace, name, inner_id, output_dir);
}
unreachable!("please check errors in proof_type logic")
}
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [base], [batch] get_vk");
self.batch_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof> {
log::info!("[circuit handler], [base], [batch] gen_agg_evm_proof");
if let Some(prover) = self.batch_prover.as_ref() {
return prover
.borrow_mut()
.gen_agg_evm_proof(chunk_hashes_proofs, name, output_dir);
}
unreachable!("please check errors in proof_type logic")
}
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool> {
log::info!("[circuit handler], [base], [batch] check_chunk_proofs");
if let Some(prover) = self.batch_prover.as_ref() {
return Ok(prover.borrow_mut().check_chunk_proofs(chunk_proofs));
}
unreachable!("please check errors in proof_type logic")
}
}

View File

@@ -0,0 +1,120 @@
use super::{types::*, CircuitsHandler};
use crate::types::ProofType;
use anyhow::{bail, Ok, Result};
use prover_next::{aggregator::Prover as NextBatchProver, zkevm::Prover as NextChunkProver};
use std::cell::RefCell;
#[derive(Default)]
pub struct NextCircuitsHandler {
chunk_prover: Option<RefCell<NextChunkProver>>,
batch_prover: Option<RefCell<NextBatchProver>>,
}
impl NextCircuitsHandler {
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str) -> Result<Self> {
match proof_type {
ProofType::ProofTypeChunk => Ok(Self {
chunk_prover: Some(RefCell::new(NextChunkProver::from_dirs(
params_dir, assets_dir,
))),
..Default::default()
}),
ProofType::ProofTypeBatch => Ok(Self {
batch_prover: Some(RefCell::new(NextBatchProver::from_dirs(
params_dir, assets_dir,
))),
..Default::default()
}),
_ => bail!("proof type invalid"),
}
}
}
impl CircuitsHandler for NextCircuitsHandler {
// api of zkevm::Prover
fn prover_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [next], [chunk] get_vk");
self.chunk_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn prover_gen_chunk_proof(
&self,
chunk_trace: Vec<BlockTrace>,
name: Option<&str>,
inner_id: Option<&str>,
output_dir: Option<&str>,
) -> Result<ChunkProof> {
log::info!("[circuit handler], [next], [chunk] gen_chunk_proof");
if let Some(prover) = self.chunk_prover.as_ref() {
let next_chunk_trace = chunk_trace
.into_iter()
.map(|block_trace| block_trace_base_to_next(block_trace))
.collect::<Result<Vec<NextBlockTrace>>>()?;
let next_chunk_proof = prover.borrow_mut().gen_chunk_proof(
next_chunk_trace,
name,
inner_id,
output_dir,
)?;
return chunk_proof_next_to_base(next_chunk_proof);
}
unreachable!("please check errors in proof_type logic")
}
// api of aggregator::Prover
fn aggregator_get_vk(&self) -> Option<Vec<u8>> {
log::info!("[circuit handler], [next], [batch] get_vk");
self.batch_prover
.as_ref()
.and_then(|prover| prover.borrow().get_vk())
}
fn aggregator_gen_agg_evm_proof(
&self,
chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)>,
name: Option<&str>,
output_dir: Option<&str>,
) -> Result<BatchProof> {
log::info!("[circuit handler], [next], [batch] gen_agg_evm_proof");
if let Some(prover) = self.batch_prover.as_ref() {
let next_chunk_hashes_proofs = chunk_hashes_proofs
.into_iter()
.map(|t| {
let next_chunk_hash = chunk_hash_base_to_next(t.0);
let next_chunk_proof = chunk_proof_base_to_next(&t.1);
match next_chunk_proof {
Result::Ok(proof) => Ok((next_chunk_hash, proof)),
Err(err) => Err(err),
}
})
.collect::<Result<Vec<(NextChunkHash, NextChunkProof)>>>()?;
let next_batch_proof = prover.borrow_mut().gen_agg_evm_proof(
next_chunk_hashes_proofs,
name,
output_dir,
)?;
return batch_proof_next_to_base(next_batch_proof);
}
unreachable!("please check errors in proof_type logic")
}
fn aggregator_check_chunk_proofs(&self, chunk_proofs: &[ChunkProof]) -> Result<bool> {
log::info!("[circuit handler], [next], [batch] check_chunk_proofs");
if let Some(prover) = self.batch_prover.as_ref() {
let next_chunk_proofs = chunk_proofs
.into_iter()
.map(|chunk_proof| chunk_proof_base_to_next(chunk_proof))
.collect::<Result<Vec<NextChunkProof>>>()?;
return Ok(prover.borrow_mut().check_chunk_proofs(&next_chunk_proofs));
}
unreachable!("please check errors in proof_type logic")
}
}

View File

@@ -0,0 +1,69 @@
use anyhow::Result;
pub use prover::{BatchProof, BlockTrace, ChunkHash, ChunkProof, Proof};
// pub use prover_next::{
// BatchProof as NextBatchProof, BlockTrace as NextBlockTrace, ChunkHash as NextChunkHash,
// ChunkProof as NextChunkProof, Proof as NextProof,
// };
// pub fn chunk_proof_next_to_base(next: NextChunkProof) -> Result<ChunkProof> {
// let proof_bytes = serde_json::to_string(&next.proof)?;
// let proof: Proof = serde_json::from_str(&proof_bytes)?;
// let chunk_hash = next.chunk_hash.map(|hash| ChunkHash {
// chain_id: hash.chain_id,
// prev_state_root: hash.prev_state_root,
// post_state_root: hash.post_state_root,
// withdraw_root: hash.withdraw_root,
// data_hash: hash.data_hash,
// tx_bytes: hash.tx_bytes,
// is_padding: hash.is_padding,
// });
// Ok(ChunkProof {
// protocol: next.protocol,
// proof,
// chunk_hash,
// })
// }
// pub fn batch_proof_next_to_base(next: NextBatchProof) -> Result<BatchProof> {
// let proof_bytes = serde_json::to_string(&next)?;
// serde_json::from_str(&proof_bytes).map_err(|err| anyhow::anyhow!(err))
// }
// pub fn chunk_proof_base_to_next(base: &ChunkProof) -> Result<NextChunkProof> {
// let proof_bytes = serde_json::to_string(&base.proof)?;
// let proof: NextProof = serde_json::from_str(&proof_bytes)?;
// let chunk_hash = base.chunk_hash.clone().map(|hash| NextChunkHash {
// chain_id: hash.chain_id,
// prev_state_root: hash.prev_state_root,
// post_state_root: hash.post_state_root,
// withdraw_root: hash.withdraw_root,
// data_hash: hash.data_hash,
// tx_bytes: hash.tx_bytes,
// is_padding: hash.is_padding,
// });
// Ok(NextChunkProof {
// protocol: base.protocol.clone(),
// proof,
// chunk_hash,
// })
// }
// pub fn chunk_hash_base_to_next(base: ChunkHash) -> NextChunkHash {
// NextChunkHash {
// chain_id: base.chain_id,
// prev_state_root: base.prev_state_root,
// post_state_root: base.post_state_root,
// withdraw_root: base.withdraw_root,
// data_hash: base.data_hash,
// tx_bytes: base.tx_bytes,
// is_padding: base.is_padding,
// }
// }
// pub fn block_trace_base_to_next(base: BlockTrace) -> Result<NextBlockTrace> {
// let trace_bytes = serde_json::to_string(&base)?;
// serde_json::from_str(&trace_bytes).map_err(|err| anyhow::anyhow!(err))
// }

View File

@@ -24,16 +24,16 @@ rollup_relayer: ## Builds the rollup_relayer bin
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: ## Lint the files - used for CI
lint: mock_abi ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder
@rm -rf build/bin
docker_push:
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
docker push scrolltech/gas-oracle:${IMAGE_VERSION}
docker push scrolltech/event-watcher:${IMAGE_VERSION}
docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile

View File

@@ -78,9 +78,15 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL1GasOracle, registry)
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
}

View File

@@ -7,11 +7,11 @@ import (
"testing"
"time"
"scroll-tech/rollup/internal/config"
"scroll-tech/common/cmd"
"scroll-tech/common/testcontainers"
"scroll-tech/common/utils"
"scroll-tech/rollup/internal/config"
)
// MockApp mockApp-test client manager.
@@ -85,7 +85,7 @@ func (b *MockApp) MockConfig(store bool) error {
return err
}
l1GethEndpoint, err := b.testApps.GetL1GethEndPoint()
l1GethEndpoint, err := b.testApps.GetPoSL1EndPoint()
if err != nil {
return err
}

Some files were not shown because too many files have changed in this diff Show More