Compare commits

..

36 Commits

Author SHA1 Message Date
Péter Garamvölgyi
9262e9af69 Reapply "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 5090b77655.
2024-04-28 11:25:10 +08:00
Péter Garamvölgyi
5090b77655 Revert "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 8f8f6eb1a1.
2024-04-28 10:54:17 +08:00
colin
4cafc9349a feat(gas-oracle): tweak gas price update logic (#1305) 2024-04-28 10:47:04 +08:00
colin
ca6f856372 docs: remove local testing image Dockerfile and docs for mac M1/M2 silicon (#1300) 2024-04-25 08:32:57 +08:00
Mengran Lan
72ee087f35 fix(db): change prover_block_list's index on public_key from non-unique to unique (#1294)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-24 11:00:09 +08:00
sbaizet
8f8f6eb1a1 build(ci): make all intermediate images compatible with multi platforms (#1291) 2024-04-23 17:20:03 +08:00
Péter Garamvölgyi
c56bda9f47 feat(gas-oracle): relay blob base fee after Bernoulli block (#1293) 2024-04-23 16:53:39 +08:00
Péter Garamvölgyi
433d5c2f52 feat: publish rollup-db-cli image (#1280)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-22 20:19:15 +08:00
georgehao
69c0f7ed75 feat: upgarde gomonkey version (#1290) 2024-04-22 16:03:41 +08:00
colin
c25b827666 docs: add bridge-history deployment in readme (#1281)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-22 14:38:39 +08:00
georgehao
da4f6818e3 feat: upgrade scroll to go1.21 (#1285)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-22 14:35:50 +08:00
georgehao
200ca7c15b fix(coordinator): fix coordinator cron exit issue (#1286)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-22 09:26:53 +08:00
Mengran Lan
53cf26597d perf(db): add indexes for table prover_task, columns respectively: task_id, created_at (#1283) 2024-04-19 16:54:13 +08:00
sbaizet
f0f7341271 ci - Docker support for arm64 on all services (#1278) 2024-04-16 10:06:29 +02:00
sbaizet
b6af88c936 ci - Docker support for arm64 (#1276) 2024-04-15 16:18:25 +02:00
sbaizet
de541a650a ci - fix github action to support arm64 platform (#1275) 2024-04-15 15:35:18 +02:00
colin
d7a57235d3 fix(rollup-relayer): tweak logs (#1274)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-15 14:07:26 +08:00
sbaizet
91d21301ec ci: add support for arm64 on event-watcher images (#1269) 2024-04-14 16:27:59 +08:00
Daniel Helm
4b32a44a70 docs(coordinator): fix internal links to config files in README (#1272) 2024-04-14 16:27:17 +08:00
georgehao
55b400c5fb fix: rollup make lint failure (#1273) 2024-04-14 16:26:35 +08:00
JayLiu
1b49091207 test: fix testcontainers listen ports (#1270)
Co-authored-by: liuyuecai <liuyuecai1995@gmail.com>
2024-04-13 13:30:22 +08:00
HAOYUatHZ
5b827c3c18 feat(db): add batch_data_hash & blob metadata (#1221)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-12 15:45:06 +08:00
georgehao
6b2eb80aa5 feat: print version info on service startup (#1268)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-12 15:09:26 +08:00
Péter Garamvölgyi
71f88b04f5 fix: add blobHash to challenge and piHash (#1264)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-12 11:00:37 +08:00
Zhang Zhuo
bcd9764bcd chore(libzkp): upgrade to v0.10.3 (#1267)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-12 10:51:39 +08:00
georgehao
b4f8377a08 fix(coordinator): fix coordinator recover public key inconsistent (#1265)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-12 07:19:31 +08:00
Zhang Zhuo
b52d43caa8 chore(libzkp): upgrade to v0.10.2 (#1257)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-11 13:44:09 +08:00
Mengran Lan
201bf401cd feat(coordinator): add new metric get_task_counter tracking prover info (#1235)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-11 12:47:03 +08:00
georgehao
898ac1d25c feat: update batch/chunk proving status when finalize without proof (#1255)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-11 11:15:09 +08:00
Snoppy
1336b89fb8 chore: fix typos (#1244)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-11 11:02:32 +08:00
Zhang Zhuo
73045df037 feat(coordinator): support multiple batch verifier versions (#1249)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-11 10:55:58 +08:00
Sina Pilehchiha
b3093e9eb6 fix: Implemented fixes for 4844 support fix review. (#1256) 2024-04-10 06:05:29 -04:00
colin
3d5250e52d feat(bridge-history): add puffer gateways (#1252)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-09 19:34:03 +08:00
HAOYUatHZ
b7324c76bc fix(test,verifier): fix TestFFI (#1250) 2024-04-09 16:47:43 +08:00
Péter Garamvölgyi
6d6e98bd6e test(codecv1): add zkEVM standard challenge digest test vectors (#1213)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2024-04-08 11:31:41 +08:00
JayLiu
9e35ce0ab4 test: add testcontainers (#1248) 2024-04-08 00:10:14 +08:00
121 changed files with 1575 additions and 1197 deletions

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -33,13 +33,13 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
@@ -54,7 +54,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
@@ -95,7 +95,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc

View File

@@ -46,6 +46,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -90,6 +91,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -134,6 +136,52 @@ jobs:
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: rollup-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: rollup-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/db_cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -178,6 +226,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -222,6 +271,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -266,6 +316,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -310,6 +361,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}

View File

@@ -7,12 +7,12 @@ on:
description: 'Go version'
required: true
type: string
default: '1.20'
default: '1.21'
RUST_VERSION:
description: 'Rust toolchain version'
required: true
type: string
default: 'nightly-2022-12-10'
default: 'nightly-2023-12-03'
PYTHON_VERSION:
description: 'Python version'
required: false
@@ -29,31 +29,191 @@ defaults:
working-directory: 'build/dockerfiles/intermediate'
jobs:
build-and-push:
build-and-publish-cuda-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build
run: |
make all
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Publish
run: |
make publish
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/cuda-go-rust-builder.Dockerfile
tags: scrolltech/cuda-go-rust-builder:cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-alpine-builder:${{ github.event.inputs.GO_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-alpine-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-alpine-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-py-runner:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/py-runner.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/py-runner:${{ github.event.inputs.PYTHON_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}

View File

@@ -50,7 +50,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -47,11 +47,5 @@ dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app -e HOST_PATH=$(PWD) scroll_test_image
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -43,8 +43,6 @@ make dev_docker
## Testing Rollup & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
```bash
@@ -54,39 +52,6 @@ go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
### For Apple Silicon (M1/M2) Macs
To run tests on Apple Silicon Macs, build and execute the Docker image as outlined below:
#### Build a Docker Image for Testing
Use the following command to build a Docker image:
```bash
make build_test_docker
```
This command builds a Docker image named `scroll_test_image` using the Dockerfile found at `./build/dockerfiles/local_test.Dockerfile`.
#### Run Docker Image
After the image is built, run a Docker container from it:
```bash
make run_test_docker
```
This command runs a Docker container named `scroll_test_container` from the `scroll_test_image` image. The container uses the host network and has access to the Docker socket and the current directory.
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## Testing Contracts
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).

View File

@@ -79,3 +79,50 @@ provides REST APIs. Please refer to the API details below.
// @Success 200
// @Router /api/txsbyhashes [post]
```
## Running bridge-history-api locally
1. Pull the latest Redis image:
```
docker pull redis:latest
```
2. Run the Redis container:
```
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
```
3. Pull the latest PostgreSQL image:
```
docker pull postgres:latest
```
4. Run the PostgreSQL container:
```
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
```
5. Run database migrations to initialize the tables:
```
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli migrate
```
6. Run bridgehistoryapi-fetcher:
```
make bridgehistoryapi-fetcher
./build/bin/bridgehistoryapi-fetcher
```
7. Run bridgehistoryapi-api:
```
make bridgehistoryapi-api
./build/bin/bridgehistoryapi-api
```
The endpoints provided in [./conf/config.json](./conf/config.json) are all public endpoints and have rate limits.
For production usage:
- For L1 endpoints, utilizing a service provider's free tier should suffice.
- For L2 endpoints, consider [running a Scroll L2geth node](https://docs.scroll.xyz/en/developers/guides/running-a-scroll-node) and using the exposed HTTP port.

View File

@@ -15,6 +15,7 @@
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
"PufferGatewayAddr": "0xA033Ff09f2da45f0e9ae495f525363722Df42b2a",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
@@ -34,7 +35,9 @@
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79"
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",

View File

@@ -23,6 +23,7 @@ type FetcherConfig struct {
DAIGatewayAddr string `json:"DAIGatewayAddr"`
USDCGatewayAddr string `json:"USDCGatewayAddr"`
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
PufferGatewayAddr string `json:"PufferGatewayAddr"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
ScrollChainAddr string `json:"ScrollChainAddr"`

View File

@@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{

View File

@@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY ./bridge-history-api/go.* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
WORKDIR app
FROM chef as planner
@@ -17,7 +17,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,8 +1,8 @@
ifeq ($(GO_VERSION),)
GO_VERSION=1.20
GO_VERSION=1.21
endif
ifeq ($(RUST_VERSION),)
RUST_VERSION=nightly-2022-12-10
RUST_VERSION=nightly-2023-12-03
endif
ifeq ($(PYTHON_VERSION),)
PYTHON_VERSION=3.10

View File

@@ -1,6 +1,6 @@
ARG CUDA_VERSION=11.7.1
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04

View File

@@ -1,4 +1,4 @@
ARG GO_VERSION=1.20
ARG GO_VERSION=1.21
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -1,5 +1,5 @@
ARG ALPINE_VERSION=3.15
ARG RUST_VERSION=nightly-2022-12-10
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM alpine:${ALPINE_VERSION}

View File

@@ -1,4 +1,4 @@
ARG RUST_VERSION=nightly-2022-12-10
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -1,11 +0,0 @@
# Start from the latest golang base image
FROM golang:1.21
# Install Docker
RUN apt-get update && apt-get install -y docker.io docker-compose
# Set the working directory
WORKDIR /go/src/app
# This container will be executable
ENTRYPOINT [ "/bin/bash" ]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,196 +0,0 @@
package docker
import (
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"time"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/common/utils"
)
var (
l1StartPort = 10000
l2StartPort = 20000
dbStartPort = 30000
)
// AppAPI app interface.
type AppAPI interface {
IsRunning() bool
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
WaitExit()
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
}
// App is collection struct of runtime docker images
type App struct {
L1gethImg GethImgInstance
L2gethImg GethImgInstance
DBImg ImgInstance
dbClient *sql.DB
DBConfig *database.DBConfig
DBConfigFile string
// common time stamp.
Timestamp int
}
// NewDockerApp returns new instance of dockerApp struct
func NewDockerApp() *App {
timestamp := time.Now().Nanosecond()
app := &App{
Timestamp: timestamp,
L1gethImg: newTestL1Docker(),
L2gethImg: newTestL2Docker(),
DBImg: newTestDBDocker("postgres"),
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
}
if err := app.mockDBConfig(); err != nil {
panic(err)
}
return app
}
// RunImages runs all images togather
func (b *App) RunImages(t *testing.T) {
b.RunDBImage(t)
b.RunL1Geth(t)
b.RunL2Geth(t)
}
// RunDBImage starts postgres docker container.
func (b *App) RunDBImage(t *testing.T) {
if b.DBImg.IsRunning() {
return
}
assert.NoError(t, b.DBImg.Start())
// try 5 times until the db is ready.
ok := utils.TryTimes(10, func() bool {
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
return err == nil && db != nil && db.Ping() == nil
})
assert.True(t, ok)
}
// Free clear all running images, double check and recycle docker container.
func (b *App) Free() {
if b.L1gethImg.IsRunning() {
_ = b.L1gethImg.Stop()
}
if b.L2gethImg.IsRunning() {
_ = b.L2gethImg.Stop()
}
if b.DBImg.IsRunning() {
_ = b.DBImg.Stop()
_ = os.Remove(b.DBConfigFile)
if !utils.IsNil(b.dbClient) {
_ = b.dbClient.Close()
b.dbClient = nil
}
}
}
// RunL1Geth starts l1geth docker container.
func (b *App) RunL1Geth(t *testing.T) {
if b.L1gethImg.IsRunning() {
return
}
assert.NoError(t, b.L1gethImg.Start())
}
// L1Client returns a ethclient by dialing running l1geth
func (b *App) L1Client() (*ethclient.Client, error) {
if utils.IsNil(b.L1gethImg) {
return nil, fmt.Errorf("l1 geth is not running")
}
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// RunL2Geth starts l2geth docker container.
func (b *App) RunL2Geth(t *testing.T) {
if b.L2gethImg.IsRunning() {
return
}
assert.NoError(t, b.L2gethImg.Start())
}
// L2Client returns a ethclient by dialing running l2geth
func (b *App) L2Client() (*ethclient.Client, error) {
if utils.IsNil(b.L2gethImg) {
return nil, fmt.Errorf("l2 geth is not running")
}
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// DBClient create and return *sql.DB instance.
func (b *App) DBClient(t *testing.T) *sql.DB {
if !utils.IsNil(b.dbClient) {
return b.dbClient
}
var (
cfg = b.DBConfig
err error
)
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
assert.NoError(t, err)
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
assert.NoError(t, b.dbClient.Ping())
return b.dbClient
}
func (b *App) mockDBConfig() error {
b.DBConfig = &database.DBConfig{
DSN: "",
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
if b.DBImg != nil {
b.DBConfig.DSN = b.DBImg.Endpoint()
}
data, err := json.Marshal(b.DBConfig)
if err != nil {
return err
}
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
}
func newTestL1Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
}
func newTestL2Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
}
func newTestDBDocker(driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
}

View File

@@ -1,131 +0,0 @@
package docker
import (
"context"
"fmt"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgDB the postgres image manager.
type ImgDB struct {
image string
name string
id string
dbName string
port int
password string
running bool
cmd *cmd.Cmd
}
// NewImgDB return postgres db img instance.
func NewImgDB(image, password, dbName string, port int) ImgInstance {
img := &ImgDB{
image: image,
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
password: password,
dbName: dbName,
port: port,
}
img.cmd = cmd.NewCmd("docker", img.prepare()...)
return img
}
// Start postgres db container.
func (i *ImgDB) Start() error {
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.running = i.isOk()
if !i.running {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
return nil
}
// Stop the container.
func (i *ImgDB) Stop() error {
if !i.running {
return nil
}
i.running = false
ctx := context.Background()
// stop the running container.
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
return err
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
}
// Endpoint return the dsn.
func (i *ImgDB) Endpoint() string {
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
}
// IsRunning returns docker container's running status.
func (i *ImgDB) IsRunning() bool {
return i.running
}
func (i *ImgDB) prepare() []string {
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
envs := []string{
"-e", "POSTGRES_PASSWORD=" + i.password,
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
}
cmd = append(cmd, envs...)
return append(cmd, i.image)
}
func (i *ImgDB) isOk() bool {
keyword := "database system is ready to accept connections"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 20):
return false
}
return i.id != ""
}

View File

@@ -1,174 +0,0 @@
package docker
import (
"context"
"fmt"
"math/big"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgGeth the geth image manager include l1geth and l2geth.
type ImgGeth struct {
image string
name string
id string
volume string
ipcPath string
httpPort int
wsPort int
chainID *big.Int
running bool
cmd *cmd.Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
img := &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
volume: volume,
ipcPath: ipc,
httpPort: hPort,
wsPort: wPort,
}
img.cmd = cmd.NewCmd("docker", img.params()...)
return img
}
// Start run image and check if it is running healthily.
func (i *ImgGeth) Start() error {
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.running = i.isOk()
if !i.running {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
// try 10 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, err := ethclient.Dial(i.Endpoint())
if err == nil && client != nil {
i.chainID, err = client.ChainID(context.Background())
return err == nil && i.chainID != nil
}
return false
})
return nil
}
// IsRunning returns docker container's running status.
func (i *ImgGeth) IsRunning() bool {
return i.running
}
// Endpoint return the connection endpoint.
func (i *ImgGeth) Endpoint() string {
switch true {
case i.httpPort != 0:
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
case i.wsPort != 0:
return fmt.Sprintf("ws://127.0.0.1:%d", i.wsPort)
default:
return i.ipcPath
}
}
// ChainID return chainID.
func (i *ImgGeth) ChainID() *big.Int {
return i.chainID
}
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 10):
return false
}
return i.id != ""
}
// Stop the docker container.
func (i *ImgGeth) Stop() error {
if !i.running {
return nil
}
i.running = false
ctx := context.Background()
// check if container is running, stop the running container.
id := GetContainerID(i.name)
if id != "" {
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
return err
}
i.id = id
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
}
func (i *ImgGeth) params() []string {
cmds := []string{"run", "--rm", "--name", i.name}
var ports []string
if i.httpPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
}
if i.wsPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.wsPort) + ":8546"}...)
}
var envs []string
if i.ipcPath != "" {
envs = append(envs, []string{"-e", fmt.Sprintf("IPC_PATH=%s", i.ipcPath)}...)
}
if i.volume != "" {
cmds = append(cmds, []string{"-v", fmt.Sprintf("%s:%s", i.volume, i.volume)}...)
}
cmds = append(cmds, ports...)
cmds = append(cmds, envs...)
return append(cmds, i.image)
}

View File

@@ -1,53 +0,0 @@
package docker_test
import (
"context"
"testing"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
)
var (
base *docker.App
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestDB(t *testing.T) {
base.RunDBImage(t)
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}
func TestL1Geth(t *testing.T) {
base.RunL1Geth(t)
client, err := base.L1Client()
assert.NoError(t, err)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}
func TestL2Geth(t *testing.T) {
base.RunL2Geth(t)
client, err := base.L2Client()
assert.NoError(t, err)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}

View File

@@ -9,7 +9,6 @@ require (
github.com/docker/docker v25.0.3+incompatible
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
github.com/jmoiron/sqlx v1.3.5
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
github.com/modern-go/reflect2 v1.0.2
@@ -144,7 +143,6 @@ require (
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect

View File

@@ -268,7 +268,6 @@ github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXS
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -400,8 +399,6 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -443,7 +440,6 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
@@ -469,9 +465,6 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=

View File

@@ -31,7 +31,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"ark-std 0.3.0",
"c-kzg",
@@ -521,7 +521,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"ethers-core",
@@ -535,7 +535,7 @@ dependencies = [
"mock",
"mpt-zktrie",
"num",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"rand",
"revm-precompile",
"serde",
@@ -1139,7 +1139,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"base64 0.13.1",
"ethers-core",
@@ -1150,7 +1150,7 @@ dependencies = [
"itertools 0.11.0",
"num",
"num-bigint",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"regex",
"serde",
"serde_json",
@@ -1293,7 +1293,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"geth-utils",
@@ -1485,7 +1485,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"halo2_proofs",
@@ -1507,7 +1507,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"env_logger 0.10.0",
"gobuild",
@@ -1684,7 +1684,7 @@ dependencies = [
"log",
"num-bigint",
"num-traits",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=scroll-dev-1201)",
"rand",
"rand_chacha",
"serde",
@@ -2142,7 +2142,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"env_logger 0.10.0",
"eth-types",
@@ -2292,7 +2292,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"ethers-core",
@@ -2307,7 +2307,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"halo2-mpt-circuits",
@@ -2315,7 +2315,7 @@ dependencies = [
"hex",
"log",
"num-bigint",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"zktrie",
]
@@ -2671,6 +2671,21 @@ dependencies = [
"subtle",
]
[[package]]
name = "poseidon-circuit"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#babf5f6a69bec40b2e6523df317c073dcd0b1f97"
dependencies = [
"bitvec",
"ff 0.13.0",
"halo2_proofs",
"lazy_static",
"log",
"rand",
"rand_xorshift",
"thiserror",
]
[[package]]
name = "poseidon-circuit"
version = "0.1.0"
@@ -2754,7 +2769,7 @@ dependencies = [
[[package]]
name = "prover"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"aggregator",
"anyhow",
@@ -4441,7 +4456,7 @@ dependencies = [
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"array-init",
"bus-mapping",
@@ -4465,7 +4480,7 @@ dependencies = [
"mpt-zktrie",
"num",
"num-bigint",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"rand",
"rand_chacha",
"rand_xorshift",
@@ -4494,6 +4509,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"snark-verifier-sdk",
]
[[package]]

View File

@@ -24,7 +24,8 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
[dependencies]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.0rc3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
base64 = "0.13.0"
env_logger = "0.9.0"

View File

@@ -12,6 +12,7 @@ use prover::{
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, BlockTrace, ChunkHash, ChunkProof,
};
use snark_verifier_sdk::verify_evm_calldata;
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof(
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
let fork_name_str = c_char_to_str(fork_name);
let fork_id = match fork_name_str {
"" => 0,
"shanghai" => 0,
"bernoulli" => 1,
_ => {
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
1
}
};
let verified = panic_catch(|| {
if fork_id == 0 {
// before upgrade#2(EIP4844)
verify_evm_calldata(
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
proof.calldata(),
)
} else {
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
}
});
verified.unwrap_or(false) as c_char
}

Binary file not shown.

View File

@@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char* check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof);
char verify_batch_proof(char* proof, char* fork_name);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);

View File

@@ -61,8 +61,11 @@ func (t *TestcontainerApps) StartL1GethContainer() error {
req := testcontainers.ContainerRequest{
Image: "scroll_l1geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
Cmd: []string{"--log.debug", "ANY"},
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
Cmd: []string{"--log.debug", "ANY"},
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
@@ -85,7 +88,10 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
req := testcontainers.ContainerRequest{
Image: "scroll_l2geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,

View File

@@ -227,18 +227,11 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
}
// blob payload
blob, z, err := constructBlobPayload(batch.Chunks)
blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
if err != nil {
return nil, err
}
// blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
daBatch := DABatch{
Version: CodecV1Version,
BatchIndex: batch.Index,
@@ -281,7 +274,7 @@ func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
}
// constructBlobPayload constructs the 4844 blob payload.
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) {
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + MaxNumChunks*4
@@ -289,8 +282,8 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
blobBytes := make([]byte, metadataLength)
// challenge digest preimage
// 1 hash for metadata and 1 for each chunk
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
// the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash
@@ -309,7 +302,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// encode L2 txs into blob payload
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return nil, nil, err
return nil, common.Hash{}, nil, err
}
blobBytes = append(blobBytes, rlpTxData...)
}
@@ -341,9 +334,19 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// convert raw data to BLSFieldElements
blob, err := makeBlobCanonical(blobBytes)
if err != nil {
return nil, nil, err
return nil, common.Hash{}, nil, err
}
// compute blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
// challenge: append blob versioned hash
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
@@ -354,7 +357,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
start := 32 - len(pointBytes)
copy(z[start:], pointBytes)
return blob, &z, nil
return blob, blobVersionedHash, &z, nil
}
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.

View File

@@ -10,7 +10,9 @@ import (
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/stretchr/testify/assert"
)
@@ -477,55 +479,125 @@ func TestCodecV1BatchChallenge(t *testing.T) {
originalBatch := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch, err := NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b36", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef", hex.EncodeToString(batch.z[:]))
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c99294", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e58", hex.EncodeToString(batch.z[:]))
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8", hex.EncodeToString(batch.z[:]))
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk5}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk6}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk7}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
// 15 chunks
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b", hex.EncodeToString(batch.z[:]))
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk8, chunk9}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab", hex.EncodeToString(batch.z[:]))
}
func repeat(element byte, count int) string {
result := make([]byte, 0, count)
for i := 0; i < count; i++ {
result = append(result, element)
}
return "0x" + common.Bytes2Hex(result)
}
func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
nRowsData := 126914
for _, tc := range []struct {
chunks [][]string
expectedz string
expectedy string
}{
// single empty chunk
{chunks: [][]string{{}}, expectedz: "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", expectedy: "304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd08"},
// single non-empty chunk
{chunks: [][]string{{"0x010203"}}, expectedz: "1c1d4bd5153f877d799853080aba243f2c186dd6d6064eaefacfe715c92b6354", expectedy: "24e80ed99526b0d15ba46f7ec682f517576ddae68d5131e5d351f8bae06ea7d3"},
// multiple empty chunks
{chunks: [][]string{{}, {}}, expectedz: "152c9ccfcc2884f9891f7adce2de110cf9f85bfd0e21f0933ae0636390a84d41", expectedy: "5f6f532676e25b49e2eae77513fbeca173a300b434c0a5e24fa554b68e27d582"},
// multiple non-empty chunks
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "62100f5381179ea7db7aa8fdedb0f7fc7b82730b75432d50ab41f80aeebe45a3", expectedy: "5b1f6e7a54907ddc06871853cf1f5d53bf2de0df7b61d0df84bc2c3fb80320cd"},
// empty chunk followed by non-empty chunk
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "2d94d241c4a2a8d8f02845ca40cfba344f3b42384af2045a75c82e725a184232", expectedy: "302416c177e9e7fe40c3bc4315066c117e27d246b0a33ef68cdda6dd333c485c"},
// non-empty chunk followed by empty chunk
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "7227567e3b1dbacb48a32bb85e4e99f73e4bd5620ea8cd4f5ac00a364c86af9c", expectedy: "2eb3dfd28362f35f562f779e749a555d2f1f87ddc716e95f04133d25189a391c"},
// max number of chunks all empty
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "1128ac3e22ced6af85be4335e0d03a266946a7cade8047e7fc59d6c8be642321", expectedy: "2d9b16422ce17f328fd00c99349768f0cb0c8648115eb3bd9b7864617ba88059"},
// max number of chunks all non-empty
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1a4025a3d74e70b511007dd55a2e252478c48054c6383285e8a176f33d99853b", expectedy: "12071ac2571c11220432a27b8be549392892e9baf4c654748ca206def3843940"},
// single chunk blob full
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "72714cc4a0ca75cee2d543b1f958e3d3dd59ac7df0d9d5617d8117b65295a5f2", expectedy: "4ebb690362bcbc42321309c210c99f2ebdb53b3fcf7cf3b17b78f6cfd1203ed3"},
// multiple chunks blob full
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "70eb5b4db503e59413238eef451871c5d12f2bb96c8b96ceca012f4ca0114727", expectedy: "568d0aaf280ec83f9c81ed2d80ecbdf199bd72dafb8a350007d37ea82997e455"},
// max number of chunks only last one non-empty not full blob
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "03db68ae16ee88489d52db19e6111b25630c5f23ad7cd14530aacf0cd231d476", expectedy: "24527d0b0e93b3dec0060c7b128975a8088b3104d3a297dc807ab43862a77a1a"},
// max number of chunks only last one non-empty full blob
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "677670193f73db499cede572bcb55677f0d2f13d690f9a820bd00bf584c3c241", expectedy: "1d85677f172dbdf4ad3094a17deeb1df4d7d2b7f35ecea44aebffa757811a268"},
// max number of chunks but last is empty
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "22935042dfe7df771b02c1f5cababfe508869e8f6339dabe25a8a32e37728bb0", expectedy: "48ca66fb5a094401728c3a6a517ffbd72c4d4d9a8c907e2d2f1320812f4d856f"},
} {
chunks := []*encoding.Chunk{}
for _, c := range tc.chunks {
block := &encoding.Block{Transactions: []*types.TransactionData{}}
for _, data := range c {
tx := &types.TransactionData{Type: 0xff, Data: data}
block.Transactions = append(block.Transactions, tx)
}
chunk := &encoding.Chunk{Blocks: []*encoding.Block{block}}
chunks = append(chunks, chunk)
}
b, _, z, err := constructBlobPayload(chunks)
assert.NoError(t, err)
actualZ := hex.EncodeToString(z[:])
assert.Equal(t, tc.expectedz, actualZ)
_, y, err := kzg4844.ComputeProof(*b, *z)
assert.NoError(t, err)
actualY := hex.EncodeToString(y[:])
assert.Equal(t, tc.expectedy, actualY)
}
}
func TestCodecV1BatchBlobDataProof(t *testing.T) {
@@ -536,7 +608,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err := batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b363d27683f7aab53cf071e2c8c8f3abfe750d206c048489450d120679cdc823f7db44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607a30dad96431f70551dd950c1426131d73ccea6d050d38dea123aad90aa8c0b734c98e8e04bd8ea8f19b415f2d85156d8", hex.EncodeToString(verifyData))
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef423dc493f1dd7c9fbecdffa021ca4649b13e8d72231487034ec6b27e155ecfd7b44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607b38542ec811c92d86ff6f3319de06ee60c42655278ccf874f3615f450de730895276828b73db03c553b0bc7e5474a5e0", hex.EncodeToString(verifyData))
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
@@ -545,7 +617,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c992946dc7e51a42a31f429bc1f321dcf020b9a661225259522dba186fcfe5dc012191b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e86a0b3c76e33edb24eb07faeaa5d3f2b15a55df6ab99abf828b5803f5681dc634602eb7469ee0556563b2eccebf16ec822", hex.EncodeToString(verifyData))
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e5821975f318babe50be728f9b52754d5ce2caa2ba82ba35b5888af1c5f28d23206b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e868e0c6fe7bd39baa5ee6339cd334a18af7c680d24e825262499e83b31633b13a9ee89813fae8441630c82bc9dce3f1e07", hex.EncodeToString(verifyData))
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
@@ -554,7 +626,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a24164e6ea8b7946ce5e40d2baa4f6aa0d030076f6074295288133c00e75dafa2afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d676248f5ca4a9f0d9b7fa48f5f649dc84e928161fd99ad1bd9a9879b05d29c5f718bfb3b0a696a5f3ed50b5b8c6a9d530b3ee", hex.EncodeToString(verifyData))
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8088a01e54e3565d2e91ce6afbadf479330847d9106737875303ce17f17c48722afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d67624aee03a0f7cdb7807bc7e0b9fb20bc299af2a35e38cda816708b40f2f18db491e14a0f5d9cfe2f4c12e4ca1a219484f17", hex.EncodeToString(verifyData))
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
@@ -563,7 +635,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
@@ -572,7 +644,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
@@ -581,7 +653,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
// 15 chunks
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
@@ -589,7 +661,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2613ca15d051a539e3b239027f9bdbd03bd3c66c98afafb674e2a7441912cbe099743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc93291fbc65cfa558e4df12bcde442483d31072000c56f94fe012285bc5832eaee5fe1d47f1e8655539c4500f66207d8edc6", hex.EncodeToString(verifyData))
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b54d28f1479467d8b97fb99f5257d3e5d63a81cb2d60e3564fe6ec6066a311c119743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc932a0b603cc94be2007d4b3b17af06b4fb30caf0e574d5abcfc5654079e65154679afad75844396082a7200a4e82462aeed", hex.EncodeToString(verifyData))
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
@@ -598,7 +670,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c4330b3863672052b3d6c6552d121b0b13f97659f49bbfb6d7fed6e4b7076e4a43383bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fdae9cb71d402cfe8bc4d659f228c41f0b9d195c5074278a2346204cfaa336f5de2244a3d53e0effa2f49c81924720e84e", hex.EncodeToString(verifyData))
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab35b73ddb4a78fc4a8540f1d8259512c46e606a701e7ef7742e38cc4562ef53b983bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fda28e5610ca6b185d6ac30b53bd83d6366fccb1956daafa90ff6b504a966b119ebb45cb3f7085b7c1d622ee1ad27fcff9", hex.EncodeToString(verifyData))
}
func TestCodecV1BatchSkipBitmap(t *testing.T) {

View File

@@ -6,6 +6,7 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
)
// CodecVersion defines the version of encoder and decoder.
@@ -17,8 +18,18 @@ const (
// CodecV1 represents the version 1 of the encoder and decoder.
CodecV1
// txTypeTest is a special transaction type used in unit tests.
txTypeTest = 0xff
)
func init() {
// make sure txTypeTest will not interfere with other transaction types
if txTypeTest == types.LegacyTxType || txTypeTest == types.AccessListTxType || txTypeTest == types.DynamicFeeTxType || txTypeTest == types.BlobTxType || txTypeTest == types.L1MessageTxType {
log.Crit("txTypeTest is overlapping with existing transaction types")
}
}
// Block represents an L2 block.
type Block struct {
Header *types.Header
@@ -134,6 +145,10 @@ func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
S: txData.S.ToInt(),
})
case txTypeTest:
// in the tests, we simply use `data` as the RLP-encoded transaction
return data, nil
case types.L1MessageTxType: // L1MessageTxType is not supported
default:
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)

View File

@@ -0,0 +1,91 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
// HardForkName the hard fork name
HardForkName string `json:"hard_fork_name"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -0,0 +1,89 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type LegacyAuthMsg struct {
// Message fields
Identity *LegacyIdentity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// LegacyIdentity contains all the fields to be signed by the prover.
type LegacyIdentity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *LegacyAuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *LegacyAuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *LegacyIdentity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -58,26 +58,6 @@ const (
ProofTypeBatch
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// GenerateToken generates token
func GenerateToken() (string, error) {
b := make([]byte, 16)
@@ -87,65 +67,6 @@ func GenerateToken() (string, error) {
return hex.EncodeToString(b), nil
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct {
*ProofDetail `json:"zkProof"`

View File

@@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) {
hash, err := identity.Hash()
assert.NoError(t, err)
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.84"
var tag = "v4.4.2"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -5,7 +5,7 @@
"license": "MIT",
"scripts": {
"test:hardhat": "npx hardhat test",
"test:forge": "forge test -vvv",
"test:forge": "forge test -vvv --evm-version cancun",
"test": "yarn test:hardhat && yarn test:forge",
"solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'",
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",

View File

@@ -2,6 +2,8 @@
pragma solidity ^0.8.24;
/// @title IScrollChain
/// @notice The interface for ScrollChain.
interface IScrollChain {
/**********
* Events *
@@ -43,23 +45,23 @@ interface IScrollChain {
* Public View Functions *
*************************/
/// @notice The latest finalized batch index.
/// @return The latest finalized batch index.
function lastFinalizedBatchIndex() external view returns (uint256);
/// @notice Return the batch hash of a committed batch.
/// @param batchIndex The index of the batch.
/// @return The batch hash of a committed batch.
function committedBatches(uint256 batchIndex) external view returns (bytes32);
/// @notice Return the state root of a committed batch.
/// @param batchIndex The index of the batch.
/// @return The state root of a committed batch.
function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32);
/// @notice Return the message root of a committed batch.
/// @param batchIndex The index of the batch.
/// @return The message root of a committed batch.
function withdrawRoots(uint256 batchIndex) external view returns (bytes32);
/// @notice Return whether the batch is finalized by batch index.
/// @param batchIndex The index of the batch.
/// @return Whether the batch is finalized by batch index.
function isBatchFinalized(uint256 batchIndex) external view returns (bool);
/*****************************

View File

@@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol";
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
/// @title MultipleVersionRollupVerifier
/// @notice Verifies aggregate zk proofs using the appropriate verifier.
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/**********
* Events *
@@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
*************/
/// @notice The address of ScrollChain contract.
address immutable scrollChain;
address public immutable scrollChain;
/***********
* Structs *
@@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// The verifiers are sorted by batchIndex in increasing order.
mapping(uint256 => Verifier[]) public legacyVerifiers;
/// @notice Mapping from verifier version to the lastest used zkevm verifier.
/// @notice Mapping from verifier version to the latest used zkevm verifier.
mapping(uint256 => Verifier) public latestVerifier;
/***************
@@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
*************************/
/// @notice Return the number of legacy verifiers.
/// @param _version The version of legacy verifiers.
/// @return The number of legacy verifiers.
function legacyVerifiersLength(uint256 _version) external view returns (uint256) {
return legacyVerifiers[_version].length;
}
@@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// @notice Compute the verifier should be used for specific batch.
/// @param _version The version of verifier to query.
/// @param _batchIndex The batch index to query.
/// @return The address of verifier.
function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) {
// Normally, we will use the latest verifier.
Verifier memory _verifier = latestVerifier[_version];
@@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
************************/
/// @notice Update the address of zkevm verifier.
/// @param _version The version of the verifier.
/// @param _startBatchIndex The start batch index when the verifier will be used.
/// @param _verifier The address of new verifier.
function updateVerifier(

View File

@@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
*************/
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
/// point evaluation precompile
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
/// @notice The chain id of the corresponding layer 2 chain.
uint64 public immutable layer2ChainId;
@@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
*****************************/
/// @notice Import layer 2 genesis block
/// @param _batchHeader The header of the genesis batch.
/// @param _stateRoot The state root of the genesis block.
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
// check genesis batch header length
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
@@ -475,7 +477,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
_postStateRoot,
_withdrawRoot,
_dataHash,
_blobDataProof[0:64]
_blobDataProof[0:64],
_blobVersionedHash
)
);

View File

@@ -2,6 +2,8 @@
pragma solidity ^0.8.24;
/// @title IRollupVerifier
/// @notice The interface for rollup verifier.
interface IRollupVerifier {
/// @notice Verify aggregate zk proof.
/// @param batchIndex The batch index to verify.

View File

@@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier {
}
// decodes all RLP encoded data and stores their DATA items
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region.
// [length - 128 bits | calldata offset - 128 bits] in a continuous memory region.
// Expects that the RLP starts with a list that defines the length
// of the whole RLP region.
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
@@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier {
}
// the one and only boundary check
// in case an attacker crafted a malicous payload
// in case an attacker crafted a malicious payload
// and succeeds in the prior verification steps
// then this should catch any bogus accesses
if iszero(eq(ptr, add(proof.offset, proof.length))) {

View File

@@ -83,6 +83,8 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
}
function testTransferUSDCRoles(address owner) external {
hevm.assume(owner != address(0));
// non-whitelisted caller call, should revert
hevm.expectRevert("only circle caller");
gateway.transferUSDCRoles(owner);

View File

@@ -38,7 +38,7 @@ make lint
## Configure
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
The coordinator behavior can be configured using [`conf/config.json`](conf/config.json). Check the code comments under `ProverManager` in [`internal/config/config.go`](internal/config/config.go) for more details.
## Start

View File

@@ -13,7 +13,6 @@ import (
"github.com/scroll-tech/go-ethereum/params"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/utils"
@@ -38,7 +37,7 @@ type CoordinatorApp struct {
HTTPPort int64
args []string
docker.AppAPI
*cmd.Cmd
}
// NewCoordinatorApp return a new coordinatorApp manager.
@@ -64,14 +63,14 @@ func NewCoordinatorApp(testApps *testcontainers.TestcontainerApps, configFile st
// RunApp run coordinator-test child process by multi parameters.
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
c.Cmd = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
c.Cmd.RunApp(func() bool { return c.Cmd.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
}
// Free stop and release coordinator-test.
func (c *CoordinatorApp) Free() {
if !utils.IsNil(c.AppAPI) {
c.AppAPI.WaitExit()
if !utils.IsNil(c.Cmd) {
c.Cmd.WaitExit()
}
_ = os.Remove(c.coordinatorFile)
}

View File

@@ -5,6 +5,7 @@
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"fork_name": "bernoulli",
"mock_mode": true,
"params_path": "",
"assets_path": ""

View File

@@ -1,6 +1,6 @@
module scroll-tech/coordinator
go 1.20
go 1.21
require (
github.com/appleboy/gin-jwt/v2 v2.9.1

View File

@@ -59,6 +59,7 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
@@ -95,6 +96,7 @@ github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -117,11 +119,13 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
@@ -144,6 +148,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -153,6 +158,7 @@ github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
@@ -164,6 +170,7 @@ github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -197,6 +204,7 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=

View File

@@ -50,6 +50,7 @@ type Config struct {
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
ForkName string `json:"fork_name"`
MockMode bool `json:"mock_mode"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`

View File

@@ -53,25 +53,44 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
return jwt.MapClaims{}
}
// recover the public key
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
var publicKey string
var err error
if v.Message.HardForkName != "" {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
HardForkName: v.Message.HardForkName,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
}
publicKey, err := authMsg.PublicKey()
if err != nil {
return jwt.MapClaims{}
}
if v.Message.HardForkName == "" {
v.Message.HardForkName = "shanghai"
}
return jwt.MapClaims{
types.PublicKey: publicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.HardForkName: v.Message.HardForkName,
}
}
@@ -89,5 +108,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
if proverVersion, ok := claims[types.ProverVersion]; ok {
c.Set(types.ProverVersion, proverVersion)
}
if hardForkName, ok := claims[types.HardForkName]; ok {
c.Set(types.HardForkName, hardForkName)
}
return nil
}

View File

@@ -2,6 +2,7 @@ package api
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -25,6 +26,8 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
panic("proof receiver new verifier failure")
}
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
Auth = NewAuthController(db)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)

View File

@@ -6,6 +6,8 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -21,15 +23,21 @@ import (
// GetTaskController the get prover task api controller
type GetTaskController struct {
proverTasks map[message.ProofType]provertask.ProverTask
getTaskAccessCounter *prometheus.CounterVec
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg)
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_get_task_access_count",
Help: "Multi dimensions get task counter.",
}, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}),
}
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
@@ -38,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
return ptc
}
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return fmt.Errorf("get public key from context failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return fmt.Errorf("get prover version from context failed")
}
ptc.getTaskAccessCounter.With(prometheus.Labels{
coordinatorType.LabelProverPublicKey: publicKey.(string),
coordinatorType.LabelProverName: proverName.(string),
coordinatorType.LabelProverVersion: proverVersion.(string),
}).Inc()
return nil
}
// GetTasks get assigned chunk/batch task
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
@@ -55,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
return
}
if err := ptc.incGetTaskAccessCounter(ctx); err != nil {
log.Warn("get_task access counter inc failed", "error", err.Error())
}
result, err := proverTask.Assign(ctx, &getTaskParameter)
if err != nil {
nerr := fmt.Errorf("return prover task err:%w", err)

View File

@@ -30,8 +30,8 @@ func (c *Collector) cleanupChallenge() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopCleanChallengeChan:
log.Info("the coordinator cleanupChallenge run loop exit")
return
}
}

View File

@@ -23,7 +23,10 @@ type Collector struct {
db *gorm.DB
ctx context.Context
stopTimeoutChan chan struct{}
stopChunkTimeoutChan chan struct{}
stopBatchTimeoutChan chan struct{}
stopBatchAllChunkReadyChan chan struct{}
stopCleanChallengeChan chan struct{}
proverTaskOrm *orm.ProverTask
chunkOrm *orm.Chunk
@@ -40,14 +43,17 @@ type Collector struct {
// NewCollector create a collector to cron collect the data to send to prover
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
c := &Collector{
cfg: cfg,
db: db,
ctx: ctx,
stopTimeoutChan: make(chan struct{}),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
challenge: orm.NewChallenge(db),
cfg: cfg,
db: db,
ctx: ctx,
stopChunkTimeoutChan: make(chan struct{}),
stopBatchTimeoutChan: make(chan struct{}),
stopBatchAllChunkReadyChan: make(chan struct{}),
stopCleanChallengeChan: make(chan struct{}),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
challenge: orm.NewChallenge(db),
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_timeout_checker_run_total",
@@ -83,7 +89,10 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
// Stop all the collector
func (c *Collector) Stop() {
c.stopTimeoutChan <- struct{}{}
c.stopChunkTimeoutChan <- struct{}{}
c.stopBatchTimeoutChan <- struct{}{}
c.stopBatchAllChunkReadyChan <- struct{}{}
c.stopCleanChallengeChan <- struct{}{}
}
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
@@ -113,8 +122,8 @@ func (c *Collector) timeoutBatchProofTask() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopBatchTimeoutChan:
log.Info("the coordinator timeoutBatchProofTask run loop exit")
return
}
}
@@ -146,8 +155,8 @@ func (c *Collector) timeoutChunkProofTask() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopChunkTimeoutChan:
log.Info("the coordinator timeoutChunkProofTask run loop exit")
return
}
}
@@ -253,8 +262,8 @@ func (c *Collector) checkBatchAllChunkReady() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopBatchAllChunkReadyChan:
log.Info("the coordinator checkBatchAllChunkReady run loop exit")
return
}
}

View File

@@ -21,5 +21,5 @@ func NewLoginLogic(db *gorm.DB) *LoginLogic {
// InsertChallengeString insert and check the challenge string is existed
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.challengeOrm.InsertChallenge(ctx, challenge)
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
}

View File

@@ -31,16 +31,17 @@ type BatchProverTask struct {
batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal *prometheus.CounterVec
batchTaskGetTaskProver *prometheus.CounterVec
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
vkMap: vkMap,
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
@@ -58,6 +59,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_batch_get_task_total",
Help: "Total number of batch get task.",
}, []string{"fork_name"}),
batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"),
}
return bp
}
@@ -69,9 +71,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName)
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err
}
@@ -81,9 +83,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
var endChunkIndex uint64 = math.MaxInt64
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if startChunk == nil {
@@ -92,9 +94,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
startChunkIndex = startChunk.Index
}
if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
if err != nil {
log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if toChunk != nil {
@@ -110,7 +112,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -119,7 +121,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -131,7 +133,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx, tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
@@ -166,20 +168,25 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx, &proverTask)
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil
}
@@ -235,7 +242,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx, batchTask.Hash); err != nil {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
}
}

View File

@@ -29,15 +29,16 @@ type ChunkProverTask struct {
chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal *prometheus.CounterVec
chunkTaskGetTaskProver *prometheus.CounterVec
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
vkMap: vkMap,
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
@@ -55,6 +56,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_chunk_get_task_total",
Help: "Total number of chunk get task.",
}, []string{"fork_name"}),
chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"),
}
return cp
}
@@ -66,9 +68,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName)
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err
}
@@ -83,7 +85,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -92,7 +94,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -104,7 +106,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx, tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
@@ -138,20 +140,25 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx, &proverTask)
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil
}

View File

@@ -2,8 +2,12 @@ package provertask
import (
"fmt"
"sync"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/version"
@@ -13,11 +17,12 @@ import (
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ErrCoordinatorInternalFailure coordinator internal db failure
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name
var ErrHardForkName = fmt.Errorf("wrong hard fork name")
var (
// ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name
ErrHardForkName = fmt.Errorf("wrong hard fork name")
)
// ProverTask the interface of a collector who send data to prover
type ProverTask interface {
@@ -28,8 +33,8 @@ type ProverTask interface {
type BaseProverTask struct {
cfg *config.Config
db *gorm.DB
vk string
vkMap map[string]string
nameForkMap map[string]uint64
forkHeights []uint64
@@ -44,6 +49,7 @@ type proverTaskContext struct {
PublicKey string
ProverName string
ProverVersion string
HardForkName string
}
// checkParameter check the prover task parameter illegal
@@ -68,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
}
ptc.ProverVersion = proverVersion.(string)
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, fmt.Errorf("get hard fork name from context failed")
}
ptc.HardForkName = hardForkName.(string)
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
}
vk, vkExist := b.vkMap[ptc.HardForkName]
if !vkExist {
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
}
// if the prover has a different vk
if getTaskParameter.VK != b.vk {
if getTaskParameter.VK != vk {
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
// if the prover reports a different prover version
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
@@ -82,7 +100,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx, publicKey.(string))
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
}
@@ -90,7 +108,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
}
@@ -115,3 +133,22 @@ func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error
return hardForkNumber, nil
}
var (
getTaskCounterInitOnce sync.Once
getTaskCounterVec *prometheus.CounterVec = nil
)
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_get_task_count",
Help: "Multi dimensions get task counter.",
}, []string{"task_type",
coordinatorType.LabelProverName,
coordinatorType.LabelProverPublicKey,
coordinatorType.LabelProverVersion})
})
return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType})
}

View File

@@ -134,18 +134,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if len(pv) == 0 {
return fmt.Errorf("get ProverVersion from context failed")
}
hardForkName := ctx.GetString(coordinatorType.HardForkName)
var proverTask *orm.ProverTask
var err error
if proofParameter.UUID != "" {
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx, proofParameter.UUID, pk)
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
} else {
// TODO When prover all have upgrade, need delete this logic
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
@@ -156,29 +157,28 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
return err
}
m.verifierTotal.WithLabelValues(pv).Inc()
var success bool
success := true
var verifyErr error
if proofMsg.Type == message.ProofTypeChunk {
success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof)
} else if proofMsg.Type == message.ProofTypeBatch {
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof)
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
if proofMsg.Type == message.ProofTypeBatch {
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
}
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
if verifyErr != nil {
return ErrValidatorFailureVerifiedFailed
@@ -189,12 +189,12 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
return ErrCoordinatorInternalFailure
}
@@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
return nil
}
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
defer func() {
if err != nil {
m.validateFailureTotal.Inc()
@@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
"cannot submit valid proof for a prover task twice",
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", proverTask.ProverPublicKey,
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
)
return ErrValidatorFailureProverTaskCannotSubmitTwice
}
@@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
log.Info("proof generated by prover failed",
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", failureMsg)
"failureMessage", failureMsg, "forkName", forkName)
return ErrValidatorFailureProofMsgStatusNotOk
}
@@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
}
@@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
m.validateFailureProverTaskHaveVerifier.Inc()
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil

View File

@@ -9,8 +9,26 @@ import (
)
// NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
return &Verifier{}, nil
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.
@@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
}
// VerifyBatchProof return a mock verification result for a BatchProof.
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}

View File

@@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct {
cfg *config.VerifierConfig
BatchVK string
ChunkVK string
cfg *config.VerifierConfig
ChunkVKMap map[string]string
BatchVKMap map[string]string
}

View File

@@ -11,9 +11,11 @@ package verifier
import "C" //nolint:typecheck
import (
"embed"
"encoding/base64"
"encoding/json"
"io"
"io/fs"
"os"
"path"
"unsafe"
@@ -28,7 +30,26 @@ import (
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
return &Verifier{cfg: cfg}, nil
batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath)
@@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
C.init_batch_verifier(paramsPathStr, assetsPathStr)
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]string),
BatchVKMap: make(map[string]string),
}
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
if err != nil {
return nil, err
}
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
if err != nil {
return nil, err
}
v.BatchVKMap[cfg.ForkName] = batchVK
v.ChunkVKMap[cfg.ForkName] = chunkVK
return &Verifier{
cfg: cfg,
BatchVK: batchVK,
ChunkVK: chunkVK,
}, nil
if err := v.loadEmbedVK(); err != nil {
return nil, err
}
return v, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, batch verifier disabled")
if string(proof.Proof) == InvalidTestProof {
@@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
return false, err
}
log.Info("Start to verify batch proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
log.Info("Start to verify batch proof ...")
verified := C.verify_batch_proof(proofStr)
verified := C.verify_batch_proof(proofStr, forkNameStr)
return verified != 0, nil
}
@@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
return verified != 0, nil
}
func readVK(filePat string) (string, error) {
func (v *Verifier) readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
if err != nil {
return "", err
@@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) {
}
return base64.StdEncoding.EncodeToString(byt), nil
}
//go:embed legacy_vk/*
var legacyVKFS embed.FS
func (v *Verifier) loadEmbedVK() error {
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
if err != nil {
log.Error("load embed batch vk failure", "err", err)
return err
}
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
if err != nil {
log.Error("load embed chunk vk failure", "err", err)
return err
}
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
return nil
}

View File

@@ -14,7 +14,6 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/verifier"
)
var (
@@ -34,7 +33,7 @@ func TestFFI(t *testing.T) {
AssetsPath: *assetsPath,
}
v, err := verifier.NewVerifier(cfg)
v, err := NewVerifier(cfg)
as.NoError(err)
chunkProof1 := readChunkProof(*chunkProofPath1, as)
@@ -50,7 +49,7 @@ func TestFFI(t *testing.T) {
t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof)
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
as.NoError(err)
as.True(batchOk)
t.Log("Verified batch proof")

View File

@@ -24,6 +24,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -54,6 +55,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -248,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{
Index: batch.Index,
Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(),
@@ -262,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ActiveAttempts: 0,
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

View File

@@ -48,6 +48,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -300,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned),
TotalAttempts: 0,
ActiveAttempts: 0,
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

View File

@@ -9,6 +9,8 @@ const (
ProverName = "prover_name"
// ProverVersion the prover version for context
ProverVersion = "prover_version"
// HardForkName the fork name for context
HardForkName = "hard_fork_name"
)
// Message the login message struct
@@ -16,6 +18,7 @@ type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
}
// LoginParameter for /login api

View File

@@ -2,7 +2,6 @@ package types
// GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct {
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"`

View File

@@ -0,0 +1,10 @@
package types
var (
// LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below.
LabelProverName = "prover_name"
// LabelProverPublicKey label name for prover public key
LabelProverPublicKey = "prover_pubkey"
// LabelProverVersion label name for prover version
LabelProverVersion = "prover_version"
)

View File

@@ -22,7 +22,6 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/common/testcontainers"
tc "scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/message"
@@ -97,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
ChainID: 111,
},
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{
MockMode: true,
},
BatchCollectionTimeSec: 10,
ChunkCollectionTimeSec: 10,
MaxVerifierWorkers: 10,
@@ -114,6 +115,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
var chainConf params.ChainConfig
for forkName, forkNumber := range nameForkMap {
switch forkName {
case "shanghai":
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
case "bernoulli":
chainConf.BernoulliBlock = big.NewInt(forkNumber)
case "london":
@@ -156,7 +159,7 @@ func setEnv(t *testing.T) {
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
testApps = tc.NewTestcontainerApps()
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
db, err = testApps.GetGormDBClient()
@@ -259,12 +262,12 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err)
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
@@ -275,12 +278,12 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err)
expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
}
@@ -300,12 +303,12 @@ func testOutdatedProverVersion(t *testing.T) {
assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
}
@@ -359,7 +362,7 @@ func testHardForkAssignTask(t *testing.T) {
{
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 0,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
@@ -449,7 +452,7 @@ func testHardForkAssignTask(t *testing.T) {
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
@@ -458,7 +461,7 @@ func testHardForkAssignTask(t *testing.T) {
{
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
@@ -467,7 +470,7 @@ func testHardForkAssignTask(t *testing.T) {
{ // hard fork 2, prover 2 block [2-3]
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
@@ -535,7 +538,7 @@ func testHardForkAssignTask(t *testing.T) {
continue
}
getTaskNumber++
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success)
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
}
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
})
@@ -578,7 +581,7 @@ func testValidProof(t *testing.T) {
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
}
// verify proof status
@@ -644,34 +647,21 @@ func testInvalidProof(t *testing.T) {
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// create mock provers.
provers := make([]*mockProver, 2)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
}
proofType := message.ProofTypeBatch
provingStatus := verifiedFailed
expectErrCode := types.ErrCoordinatorHandleZkProofFailure
prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
)
var (
chunkProofStatus types.ProvingStatus
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
batchProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
batchActiveAttempts int16
batchMaxAttempts int16
)
@@ -679,24 +669,17 @@ func testInvalidProof(t *testing.T) {
for {
select {
case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
if batchProofStatus == types.ProvingTaskAssigned {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts))
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String())
return
}
}
@@ -736,7 +719,7 @@ func testProofGeneratedFailed(t *testing.T) {
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
}
// verify proof status
@@ -859,14 +842,14 @@ func testTimeoutProof(t *testing.T) {
assert.NotNil(t, proverChunkTask2)
assert.Equal(t, chunkTask2ErrCode, types.Success)
assert.Equal(t, chunkTask2ErrMsg, "")
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
assert.NotNil(t, proverBatchTask2)
assert.Equal(t, batchTask2ErrCode, types.Success)
assert.Equal(t, batchTask2ErrMsg, "")
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
// verify proof status, it should be verified now, because second prover sent valid proof
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)

View File

@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
}
// connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator(t *testing.T) string {
func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
challengeString := r.challenge(t)
return r.login(t, challengeString)
return r.login(t, challengeString, forkName)
}
func (r *mockProver) challenge(t *testing.T) string {
@@ -76,18 +76,32 @@ func (r *mockProver) challenge(t *testing.T) string {
return loginData.Token
}
func (r *mockProver) login(t *testing.T, challengeString string) string {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
},
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
var body string
if forkName != "" {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
HardForkName: forkName,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
var result ctypes.Response
client := resty.New()
@@ -137,7 +151,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
// get task from coordinator
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
type response struct {
@@ -151,7 +165,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -160,9 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
}
// Testing expected errors returned by coordinator.
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
//
//nolint:unparam
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
// get task from coordinator
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
type response struct {
@@ -185,7 +201,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
return result.ErrCode, result.ErrMsg
}
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
proofMsgStatus := message.StatusOk
if proofStatus == generatedFailed {
proofMsgStatus = message.StatusProofError
@@ -228,7 +244,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
submitProof.Proof = string(encodeData)
}
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
submitProofData, err := json.Marshal(submitProof)

View File

@@ -1,93 +1,89 @@
package migrate
import (
"database/sql"
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
"scroll-tech/common/testcontainers"
)
var (
base *docker.App
pgDB *sqlx.DB
testApps *testcontainers.TestcontainerApps
pgDB *sql.DB
)
func initEnv(t *testing.T) error {
func setupEnv(t *testing.T) {
// Start db container.
base.RunDBImage(t)
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
gormClient, err := testApps.GetGormDBClient()
assert.NoError(t, err)
pgDB, err = gormClient.DB()
assert.NoError(t, err)
}
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
func TestMain(m *testing.M) {
defer func() {
if testApps != nil {
testApps.Free()
}
}()
m.Run()
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
setupEnv(t)
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(0), cur)
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
status := Status(pgDB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, ResetDB(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(16), cur)
assert.Equal(t, int64(20), cur)
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(16), cur)
assert.Equal(t, int64(20), cur)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(16), version)
assert.Equal(t, int64(20), version)
assert.NoError(t, Rollback(pgDB.DB, nil))
assert.NoError(t, Rollback(pgDB, nil))
cur, err := Current(pgDB.DB)
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, version, cur+1)
targetVersion := int64(0)
assert.NoError(t, Rollback(pgDB.DB, &targetVersion))
assert.NoError(t, Rollback(pgDB, &targetVersion))
cur, err = Current(pgDB.DB)
cur, err = Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(0), cur)
}

View File

@@ -0,0 +1,27 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE chunk
ADD COLUMN crc_max INTEGER DEFAULT 0,
ADD COLUMN blob_size INTEGER DEFAULT 0;
ALTER TABLE batch
ADD COLUMN data_hash VARCHAR DEFAULT '',
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
ADD COLUMN blob_size INTEGER DEFAULT 0;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS batch
DROP COLUMN data_hash,
DROP COLUMN blob_data_proof,
DROP COLUMN blob_size;
ALTER TABLE IF EXISTS chunk
DROP COLUMN crc_max,
DROP COLUMN blob_size;
-- +goose StatementEnd

View File

@@ -0,0 +1,18 @@
-- +goose Up
-- +goose StatementBegin
create index if not exists idx_prover_task_created_at on prover_task(created_at) where deleted_at IS NULL;
create index if not exists idx_prover_task_task_id on prover_task(task_id) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists idx_prover_task_created_at;
drop index if exists idx_prover_task_task_id;
-- +goose StatementEnd

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE l1_block
ADD COLUMN blob_base_fee BIGINT DEFAULT 0;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS l1_block
DROP COLUMN blob_base_fee;
-- +goose StatementEnd

View File

@@ -0,0 +1,17 @@
-- +goose Up
-- +goose StatementBegin
DROP INDEX if exists idx_prover_block_list_on_public_key;
CREATE UNIQUE INDEX if not exists uniq_prover_block_list_on_public_key ON prover_block_list(public_key) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
CREATE INDEX if not exists idx_prover_block_list_on_public_key ON prover_block_list(public_key);
DROP INDEX if exists uniq_prover_block_list_on_public_key;
-- +goose StatementEnd

View File

@@ -21,14 +21,15 @@ import (
type CoordinatorClient struct {
client *resty.Client
proverName string
priv *ecdsa.PrivateKey
proverName string
hardForkName string
priv *ecdsa.PrivateKey
mu sync.Mutex
}
// NewCoordinatorClient constructs a new CoordinatorClient.
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
client := resty.New().
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
SetRetryCount(cfg.RetryCount).
@@ -50,9 +51,10 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
"retry wait time (second)", cfg.RetryWaitTimeSec)
return &CoordinatorClient{
client: client,
proverName: proverName,
priv: priv,
client: client,
proverName: proverName,
hardForkName: hardForkName,
priv: priv,
}, nil
}
@@ -83,6 +85,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
ProverVersion: version.Version,
ProverName: c.proverName,
Challenge: challengeResult.Data.Token,
HardForkName: c.hardForkName,
},
}
@@ -97,10 +100,12 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
}{
Challenge: authMsg.Identity.Challenge,
ProverName: authMsg.Identity.ProverName,
ProverVersion: authMsg.Identity.ProverVersion,
HardForkName: authMsg.Identity.HardForkName,
},
Signature: authMsg.Signature,
}

View File

@@ -25,6 +25,7 @@ type LoginRequest struct {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
} `json:"message"`
Signature string `json:"signature"`
}
@@ -41,7 +42,6 @@ type LoginResponse struct {
// GetTaskRequest defines the request structure for GetTask API
type GetTaskRequest struct {
HardForkName string `json:"hard_fork_name"`
TaskType message.ProofType `json:"task_type"`
ProverHeight uint64 `json:"prover_height,omitempty"`
VK string `json:"vk"`

View File

@@ -12,7 +12,6 @@ import (
"scroll-tech/prover/config"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
@@ -40,7 +39,7 @@ type ProverApp struct {
index int
name string
args []string
docker.AppAPI
*cmd.Cmd
}
// NewProverApp return a new proverApp manager.
@@ -65,7 +64,7 @@ func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.Moc
name: name,
args: []string{"--log.debug", "--config", proverFile},
}
proverApp.AppAPI = cmd.NewCmd(proverApp.name, proverApp.args...)
proverApp.Cmd = cmd.NewCmd(proverApp.name, proverApp.args...)
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
panic(err)
}
@@ -74,13 +73,13 @@ func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.Moc
// RunApp run prover-test child process by multi parameters.
func (r *ProverApp) RunApp(t *testing.T) {
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
r.Cmd.RunApp(func() bool { return r.Cmd.WaitResult(t, time.Second*40, "prover start successfully") })
}
// Free stop and release prover-test.
func (r *ProverApp) Free() {
if !utils.IsNil(r.AppAPI) {
r.AppAPI.WaitExit()
if !utils.IsNil(r.Cmd) {
r.Cmd.WaitExit()
}
_ = os.Remove(r.proverFile)
_ = os.Remove(r.Config.KeystorePath)

View File

@@ -82,7 +82,7 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
}
log.Info("init prover_core successfully!")
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv)
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv)
if err != nil {
return nil, err
}
@@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error {
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// prepare the request
req := &client.GetTaskRequest{
HardForkName: r.cfg.HardForkName,
TaskType: r.Type(),
TaskType: r.Type(),
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
// instead of passing vk when we login
VK: r.proverCore.VK,

View File

@@ -24,7 +24,7 @@ rollup_relayer: ## Builds the rollup_relayer bin
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: ## Lint the files - used for CI
lint: mock_abi ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder

View File

@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
}
})
log.Info("Start event-watcher successfully")
log.Info("Start event-watcher successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -78,9 +78,15 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL1GasOracle, registry)
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
}
@@ -109,7 +115,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
log.Info("Start gas-oracle successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -7,12 +7,11 @@ import (
"testing"
"time"
"scroll-tech/rollup/internal/config"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/utils"
"scroll-tech/rollup/internal/config"
)
// MockApp mockApp-test client manager.
@@ -20,7 +19,7 @@ type MockApp struct {
Config *config.Config
testApps *testcontainers.TestcontainerApps
mockApps map[utils.MockAppName]docker.AppAPI
mockApps map[utils.MockAppName]*cmd.Cmd
originFile string
rollupFile string
@@ -33,7 +32,7 @@ func NewRollupApp(testApps *testcontainers.TestcontainerApps, file string) *Mock
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", testApps.Timestamp)
rollupApp := &MockApp{
testApps: testApps,
mockApps: make(map[utils.MockAppName]docker.AppAPI),
mockApps: make(map[utils.MockAppName]*cmd.Cmd),
originFile: file,
rollupFile: rollupFile,
args: []string{"--log.debug", "--config", rollupFile},
@@ -69,7 +68,7 @@ func (b *MockApp) WaitExit() {
for _, app := range b.mockApps {
app.WaitExit()
}
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
b.mockApps = make(map[utils.MockAppName]*cmd.Cmd)
}
// Free stop and release rollup mocked apps.

View File

@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
log.Info("Start rollup-relayer successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -19,7 +19,9 @@
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000
"gas_price_diff": 50000,
"l1_base_fee_weight": 0.132,
"l1_blob_base_fee_weight": 0.145
},
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
}

Some files were not shown because too many files have changed in this diff Show More