Compare commits

..

31 Commits

Author SHA1 Message Date
Péter Garamvölgyi
9262e9af69 Reapply "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 5090b77655.
2024-04-28 11:25:10 +08:00
Péter Garamvölgyi
5090b77655 Revert "build(ci): make all intermediate images compatible with multi platforms (#1291)"
This reverts commit 8f8f6eb1a1.
2024-04-28 10:54:17 +08:00
colin
4cafc9349a feat(gas-oracle): tweak gas price update logic (#1305) 2024-04-28 10:47:04 +08:00
colin
ca6f856372 docs: remove local testing image Dockerfile and docs for mac M1/M2 silicon (#1300) 2024-04-25 08:32:57 +08:00
Mengran Lan
72ee087f35 fix(db): change prover_block_list's index on public_key from non-unique to unique (#1294)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-24 11:00:09 +08:00
sbaizet
8f8f6eb1a1 build(ci): make all intermediate images compatible with multi platforms (#1291) 2024-04-23 17:20:03 +08:00
Péter Garamvölgyi
c56bda9f47 feat(gas-oracle): relay blob base fee after Bernoulli block (#1293) 2024-04-23 16:53:39 +08:00
Péter Garamvölgyi
433d5c2f52 feat: publish rollup-db-cli image (#1280)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-22 20:19:15 +08:00
georgehao
69c0f7ed75 feat: upgarde gomonkey version (#1290) 2024-04-22 16:03:41 +08:00
colin
c25b827666 docs: add bridge-history deployment in readme (#1281)
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-04-22 14:38:39 +08:00
georgehao
da4f6818e3 feat: upgrade scroll to go1.21 (#1285)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-22 14:35:50 +08:00
georgehao
200ca7c15b fix(coordinator): fix coordinator cron exit issue (#1286)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-22 09:26:53 +08:00
Mengran Lan
53cf26597d perf(db): add indexes for table prover_task, columns respectively: task_id, created_at (#1283) 2024-04-19 16:54:13 +08:00
sbaizet
f0f7341271 ci - Docker support for arm64 on all services (#1278) 2024-04-16 10:06:29 +02:00
sbaizet
b6af88c936 ci - Docker support for arm64 (#1276) 2024-04-15 16:18:25 +02:00
sbaizet
de541a650a ci - fix github action to support arm64 platform (#1275) 2024-04-15 15:35:18 +02:00
colin
d7a57235d3 fix(rollup-relayer): tweak logs (#1274)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-15 14:07:26 +08:00
sbaizet
91d21301ec ci: add support for arm64 on event-watcher images (#1269) 2024-04-14 16:27:59 +08:00
Daniel Helm
4b32a44a70 docs(coordinator): fix internal links to config files in README (#1272) 2024-04-14 16:27:17 +08:00
georgehao
55b400c5fb fix: rollup make lint failure (#1273) 2024-04-14 16:26:35 +08:00
JayLiu
1b49091207 test: fix testcontainers listen ports (#1270)
Co-authored-by: liuyuecai <liuyuecai1995@gmail.com>
2024-04-13 13:30:22 +08:00
HAOYUatHZ
5b827c3c18 feat(db): add batch_data_hash & blob metadata (#1221)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-12 15:45:06 +08:00
georgehao
6b2eb80aa5 feat: print version info on service startup (#1268)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-12 15:09:26 +08:00
Péter Garamvölgyi
71f88b04f5 fix: add blobHash to challenge and piHash (#1264)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-12 11:00:37 +08:00
Zhang Zhuo
bcd9764bcd chore(libzkp): upgrade to v0.10.3 (#1267)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-12 10:51:39 +08:00
georgehao
b4f8377a08 fix(coordinator): fix coordinator recover public key inconsistent (#1265)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-12 07:19:31 +08:00
Zhang Zhuo
b52d43caa8 chore(libzkp): upgrade to v0.10.2 (#1257)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-11 13:44:09 +08:00
Mengran Lan
201bf401cd feat(coordinator): add new metric get_task_counter tracking prover info (#1235)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-11 12:47:03 +08:00
georgehao
898ac1d25c feat: update batch/chunk proving status when finalize without proof (#1255)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-11 11:15:09 +08:00
Snoppy
1336b89fb8 chore: fix typos (#1244)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-11 11:02:32 +08:00
Zhang Zhuo
73045df037 feat(coordinator): support multiple batch verifier versions (#1249)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-11 10:55:58 +08:00
101 changed files with 1345 additions and 516 deletions

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -33,13 +33,13 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Lint
@@ -54,7 +54,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install goimports
@@ -95,7 +95,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc

View File

@@ -46,6 +46,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -90,6 +91,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -134,6 +136,52 @@ jobs:
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
rollup-db-cli:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: rollup-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: rollup-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/db_cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -178,6 +226,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -222,6 +271,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -266,6 +316,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -310,6 +361,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}

View File

@@ -7,12 +7,12 @@ on:
description: 'Go version'
required: true
type: string
default: '1.20'
default: '1.21'
RUST_VERSION:
description: 'Rust toolchain version'
required: true
type: string
default: 'nightly-2022-12-10'
default: 'nightly-2023-12-03'
PYTHON_VERSION:
description: 'Python version'
required: false
@@ -29,31 +29,191 @@ defaults:
working-directory: 'build/dockerfiles/intermediate'
jobs:
build-and-push:
build-and-publish-cuda-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build
run: |
make all
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Publish
run: |
make publish
env:
GO_VERSION: ${{ inputs.GO_VERSION }}
RUST_VERSION: ${{ inputs.RUST_VERSION }}
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/cuda-go-rust-builder.Dockerfile
tags: scrolltech/cuda-go-rust-builder:cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-alpine-builder:${{ github.event.inputs.GO_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-alpine-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-alpine-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-py-runner:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/py-runner.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/py-runner:${{ github.event.inputs.PYTHON_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}

View File

@@ -50,7 +50,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2022-12-10
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -47,11 +47,5 @@ dev_docker: ## build docker images for development/testing usages
docker build -t scroll_l1geth ./common/docker/l1geth/
docker build -t scroll_l2geth ./common/docker/l2geth/
build_test_docker: ## build Docker image for local testing on M1/M2 Silicon Mac
docker build -t scroll_test_image -f ./build/dockerfiles/local_testing.Dockerfile $$(mktemp -d)
run_test_docker: ## run Docker image for local testing on M1/M2 Silicon Mac
docker run -it --rm --name scroll_test_container --network=host -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):/go/src/app -e HOST_PATH=$(PWD) scroll_test_image
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -43,8 +43,6 @@ make dev_docker
## Testing Rollup & Coordinator
### For Non-Apple Silicon (M1/M2) Macs
Run the tests using the following commands:
```bash
@@ -54,39 +52,6 @@ go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
### For Apple Silicon (M1/M2) Macs
To run tests on Apple Silicon Macs, build and execute the Docker image as outlined below:
#### Build a Docker Image for Testing
Use the following command to build a Docker image:
```bash
make build_test_docker
```
This command builds a Docker image named `scroll_test_image` using the Dockerfile found at `./build/dockerfiles/local_test.Dockerfile`.
#### Run Docker Image
After the image is built, run a Docker container from it:
```bash
make run_test_docker
```
This command runs a Docker container named `scroll_test_container` from the `scroll_test_image` image. The container uses the host network and has access to the Docker socket and the current directory.
Once the Docker container is running, execute the tests using the following commands:
```bash
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...
```
## Testing Contracts
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).

View File

@@ -79,3 +79,50 @@ provides REST APIs. Please refer to the API details below.
// @Success 200
// @Router /api/txsbyhashes [post]
```
## Running bridge-history-api locally
1. Pull the latest Redis image:
```
docker pull redis:latest
```
2. Run the Redis container:
```
docker run --name bridgehistoryapi-redis -d -p 6379:6379 redis:latest
```
3. Pull the latest PostgreSQL image:
```
docker pull postgres:latest
```
4. Run the PostgreSQL container:
```
docker run --name bridgehistoryapi-history-db -p 5444:5432 -e POSTGRES_PASSWORD=123456 -e POSTGRES_DB=test -d postgres
```
5. Run database migrations to initialize the tables:
```
make bridgehistoryapi-db-cli
./build/bin/bridgehistoryapi-db-cli migrate
```
6. Run bridgehistoryapi-fetcher:
```
make bridgehistoryapi-fetcher
./build/bin/bridgehistoryapi-fetcher
```
7. Run bridgehistoryapi-api:
```
make bridgehistoryapi-api
./build/bin/bridgehistoryapi-api
```
The endpoints provided in [./conf/config.json](./conf/config.json) are all public endpoints and have rate limits.
For production usage:
- For L1 endpoints, utilizing a service provider's free tier should suffice.
- For L2 endpoints, consider [running a Scroll L2geth node](https://docs.scroll.xyz/en/developers/guides/running-a-scroll-node) and using the exposed HTTP port.

View File

@@ -15,6 +15,7 @@
"USDCGatewayAddr": "0xf1AF3b23DE0A5Ca3CAb7261cb0061C0D779A5c7B",
"LIDOGatewayAddr": "0x6625C6332c9F91F2D27c304E729B86db87A3f504",
"DAIGatewayAddr": "0x67260A8B73C5B77B55c1805218A42A7A6F98F515",
"PufferGatewayAddr": "0xA033Ff09f2da45f0e9ae495f525363722Df42b2a",
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B"
@@ -34,7 +35,9 @@
"USDCGatewayAddr": "0x33B60d5Dd260d453cAC3782b0bDC01ce84672142",
"LIDOGatewayAddr": "0x8aE8f22226B9d789A36AC81474e633f8bE2856c9",
"DAIGatewayAddr": "0xaC78dff3A87b5b534e366A93E785a0ce8fA6Cc62",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79"
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY ./bridge-history-api/go.* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM golang:1.20-alpine3.16 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
WORKDIR app
FROM chef as planner
@@ -17,7 +17,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -1,8 +1,8 @@
ifeq ($(GO_VERSION),)
GO_VERSION=1.20
GO_VERSION=1.21
endif
ifeq ($(RUST_VERSION),)
RUST_VERSION=nightly-2022-12-10
RUST_VERSION=nightly-2023-12-03
endif
ifeq ($(PYTHON_VERSION),)
PYTHON_VERSION=3.10

View File

@@ -1,6 +1,6 @@
ARG CUDA_VERSION=11.7.1
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04

View File

@@ -1,4 +1,4 @@
ARG GO_VERSION=1.20
ARG GO_VERSION=1.21
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM golang:${GO_VERSION}-alpine

View File

@@ -1,5 +1,5 @@
ARG GO_VERSION=1.20
ARG RUST_VERSION=nightly-2022-12-10
ARG GO_VERSION=1.21
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -1,5 +1,5 @@
ARG ALPINE_VERSION=3.15
ARG RUST_VERSION=nightly-2022-12-10
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM alpine:${ALPINE_VERSION}

View File

@@ -1,4 +1,4 @@
ARG RUST_VERSION=nightly-2022-12-10
ARG RUST_VERSION=nightly-2023-12-03
ARG CARGO_CHEF_TAG=0.1.41
FROM ubuntu:20.04

View File

@@ -1,11 +0,0 @@
# Start from the latest golang base image
FROM golang:1.21
# Install Docker
RUN apt-get update && apt-get install -y docker.io docker-compose
# Set the working directory
WORKDIR /go/src/app
# This container will be executable
ENTRYPOINT [ "/bin/bash" ]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.20 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./

View File

@@ -31,7 +31,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"ark-std 0.3.0",
"c-kzg",
@@ -521,7 +521,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"ethers-core",
@@ -535,7 +535,7 @@ dependencies = [
"mock",
"mpt-zktrie",
"num",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"rand",
"revm-precompile",
"serde",
@@ -1139,7 +1139,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"base64 0.13.1",
"ethers-core",
@@ -1150,7 +1150,7 @@ dependencies = [
"itertools 0.11.0",
"num",
"num-bigint",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"regex",
"serde",
"serde_json",
@@ -1293,7 +1293,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"geth-utils",
@@ -1485,7 +1485,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"halo2_proofs",
@@ -1507,7 +1507,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"env_logger 0.10.0",
"gobuild",
@@ -1684,7 +1684,7 @@ dependencies = [
"log",
"num-bigint",
"num-traits",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=scroll-dev-1201)",
"rand",
"rand_chacha",
"serde",
@@ -2142,7 +2142,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"env_logger 0.10.0",
"eth-types",
@@ -2292,7 +2292,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"ethers-core",
@@ -2307,7 +2307,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"eth-types",
"halo2-mpt-circuits",
@@ -2315,7 +2315,7 @@ dependencies = [
"hex",
"log",
"num-bigint",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"zktrie",
]
@@ -2671,6 +2671,21 @@ dependencies = [
"subtle",
]
[[package]]
name = "poseidon-circuit"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#babf5f6a69bec40b2e6523df317c073dcd0b1f97"
dependencies = [
"bitvec",
"ff 0.13.0",
"halo2_proofs",
"lazy_static",
"log",
"rand",
"rand_xorshift",
"thiserror",
]
[[package]]
name = "poseidon-circuit"
version = "0.1.0"
@@ -2754,7 +2769,7 @@ dependencies = [
[[package]]
name = "prover"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"aggregator",
"anyhow",
@@ -4441,7 +4456,7 @@ dependencies = [
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [
"array-init",
"bus-mapping",
@@ -4465,7 +4480,7 @@ dependencies = [
"mpt-zktrie",
"num",
"num-bigint",
"poseidon-circuit",
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"rand",
"rand_chacha",
"rand_xorshift",
@@ -4494,6 +4509,7 @@ dependencies = [
"serde",
"serde_derive",
"serde_json",
"snark-verifier-sdk",
]
[[package]]

View File

@@ -24,7 +24,8 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
[dependencies]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.0rc3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
base64 = "0.13.0"
env_logger = "0.9.0"

View File

@@ -12,6 +12,7 @@ use prover::{
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, BlockTrace, ChunkHash, ChunkProof,
};
use snark_verifier_sdk::verify_evm_calldata;
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof(
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
let fork_name_str = c_char_to_str(fork_name);
let fork_id = match fork_name_str {
"" => 0,
"shanghai" => 0,
"bernoulli" => 1,
_ => {
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
1
}
};
let verified = panic_catch(|| {
if fork_id == 0 {
// before upgrade#2(EIP4844)
verify_evm_calldata(
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
proof.calldata(),
)
} else {
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
}
});
verified.unwrap_or(false) as c_char
}

Binary file not shown.

View File

@@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char* check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof);
char verify_batch_proof(char* proof, char* fork_name);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);

View File

@@ -61,8 +61,11 @@ func (t *TestcontainerApps) StartL1GethContainer() error {
req := testcontainers.ContainerRequest{
Image: "scroll_l1geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
Cmd: []string{"--log.debug", "ANY"},
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
Cmd: []string{"--log.debug", "ANY"},
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
@@ -85,7 +88,10 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
req := testcontainers.ContainerRequest{
Image: "scroll_l2geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
WaitingFor: wait.ForAll(
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,

View File

@@ -227,18 +227,11 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
}
// blob payload
blob, z, err := constructBlobPayload(batch.Chunks)
blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
if err != nil {
return nil, err
}
// blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
daBatch := DABatch{
Version: CodecV1Version,
BatchIndex: batch.Index,
@@ -281,7 +274,7 @@ func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
}
// constructBlobPayload constructs the 4844 blob payload.
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) {
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + MaxNumChunks*4
@@ -289,8 +282,8 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
blobBytes := make([]byte, metadataLength)
// challenge digest preimage
// 1 hash for metadata and 1 for each chunk
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
// the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash
@@ -309,7 +302,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// encode L2 txs into blob payload
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return nil, nil, err
return nil, common.Hash{}, nil, err
}
blobBytes = append(blobBytes, rlpTxData...)
}
@@ -341,9 +334,19 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// convert raw data to BLSFieldElements
blob, err := makeBlobCanonical(blobBytes)
if err != nil {
return nil, nil, err
return nil, common.Hash{}, nil, err
}
// compute blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
// challenge: append blob versioned hash
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
@@ -354,7 +357,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
start := 32 - len(pointBytes)
copy(z[start:], pointBytes)
return blob, &z, nil
return blob, blobVersionedHash, &z, nil
}
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.

View File

@@ -479,55 +479,55 @@ func TestCodecV1BatchChallenge(t *testing.T) {
originalBatch := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch, err := NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b36", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef", hex.EncodeToString(batch.z[:]))
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c99294", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e58", hex.EncodeToString(batch.z[:]))
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8", hex.EncodeToString(batch.z[:]))
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk5}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk6}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk7}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
// 15 chunks
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b", hex.EncodeToString(batch.z[:]))
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk8, chunk9}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab", hex.EncodeToString(batch.z[:]))
}
func repeat(element byte, count int) string {
@@ -547,31 +547,31 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
expectedy string
}{
// single empty chunk
{chunks: [][]string{{}}, expectedz: "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", expectedy: "28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df51"},
{chunks: [][]string{{}}, expectedz: "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", expectedy: "304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd08"},
// single non-empty chunk
{chunks: [][]string{{"0x010203"}}, expectedz: "30a9d6cfc2b87fb00d80e7fea28ebb9eff0bd526dbf1da32acfe8c5fd49632ff", expectedy: "723515444cb320fe437b9cea3b51293f5fbcb5913739ad35eab28b1863f7c312"},
{chunks: [][]string{{"0x010203"}}, expectedz: "1c1d4bd5153f877d799853080aba243f2c186dd6d6064eaefacfe715c92b6354", expectedy: "24e80ed99526b0d15ba46f7ec682f517576ddae68d5131e5d351f8bae06ea7d3"},
// multiple empty chunks
{chunks: [][]string{{}, {}}, expectedz: "17772348f946a4e4adfcaf5c1690d078933b6b090ca9a52fab6c7e545b1007ae", expectedy: "05ba9abbc81a1c97f4cdaa683a7e0c731d9dfd88feef8f7b2fcfd79e593662b5"},
{chunks: [][]string{{}, {}}, expectedz: "152c9ccfcc2884f9891f7adce2de110cf9f85bfd0e21f0933ae0636390a84d41", expectedy: "5f6f532676e25b49e2eae77513fbeca173a300b434c0a5e24fa554b68e27d582"},
// multiple non-empty chunks
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "60376321eea0886c29bd97d95851c7b5fbdb064c8adfdadd7678617b32b3ebf2", expectedy: "50cfbcece01cadb4eade40649e17b140b31f96088097e38f020e31dfe6551604"},
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "62100f5381179ea7db7aa8fdedb0f7fc7b82730b75432d50ab41f80aeebe45a3", expectedy: "5b1f6e7a54907ddc06871853cf1f5d53bf2de0df7b61d0df84bc2c3fb80320cd"},
// empty chunk followed by non-empty chunk
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "054539f03564eda9462d582703cde0788e4e27c311582ddfb19835358273a7ca", expectedy: "1fba03580b5908c4c66b48e79c10e7a34e4b27ed37a1a049b3e17e017cad5245"},
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "2d94d241c4a2a8d8f02845ca40cfba344f3b42384af2045a75c82e725a184232", expectedy: "302416c177e9e7fe40c3bc4315066c117e27d246b0a33ef68cdda6dd333c485c"},
// non-empty chunk followed by empty chunk
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "0b82dceaa6ca4b5d704590c921accfd991b56b5ad0212e6a4e63e54915a2053b", expectedy: "2362f3a0c87f0ea11eb898ed608c7f09a42926a058d4c5d111a0f54cad10ebbd"},
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "7227567e3b1dbacb48a32bb85e4e99f73e4bd5620ea8cd4f5ac00a364c86af9c", expectedy: "2eb3dfd28362f35f562f779e749a555d2f1f87ddc716e95f04133d25189a391c"},
// max number of chunks all empty
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "174cd3ba9b2ae8ab789ec0b5b8e0b27ee122256ec1756c383dbf2b5b96903f1b", expectedy: "225cab9658904181671eb7abc342ffc36a6836048b64a67f0fb758439da2567b"},
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "1128ac3e22ced6af85be4335e0d03a266946a7cade8047e7fc59d6c8be642321", expectedy: "2d9b16422ce17f328fd00c99349768f0cb0c8648115eb3bd9b7864617ba88059"},
// max number of chunks all non-empty
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1e93e961cdfb4bd26a5be48f23af4f1aa8c6bebe57a089d3250f8afb1e988bf8", expectedy: "24ed4791a70b28a6bad21c22d58f82a5ea5f9f9d2bcfc07428b494e9ae93de6e"},
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1a4025a3d74e70b511007dd55a2e252478c48054c6383285e8a176f33d99853b", expectedy: "12071ac2571c11220432a27b8be549392892e9baf4c654748ca206def3843940"},
// single chunk blob full
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "61405cb0b114dfb4d611be84bedba0fcd2e55615e193e424f1cc7b1af0df3d31", expectedy: "58609bbca10e50489b630ecb5b9347378579ed784d6a10749fd505055d35c3c0"},
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "72714cc4a0ca75cee2d543b1f958e3d3dd59ac7df0d9d5617d8117b65295a5f2", expectedy: "4ebb690362bcbc42321309c210c99f2ebdb53b3fcf7cf3b17b78f6cfd1203ed3"},
// multiple chunks blob full
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "22533c3ea99536b4b83a89835aa91e6f0d2fc3866c201e18d7ca4b3af92fad61", expectedy: "40d4b71492e1a06ee3c273ef9003c7cb05aed021208871e13fa33302fa0f4dcc"},
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "70eb5b4db503e59413238eef451871c5d12f2bb96c8b96ceca012f4ca0114727", expectedy: "568d0aaf280ec83f9c81ed2d80ecbdf199bd72dafb8a350007d37ea82997e455"},
// max number of chunks only last one non-empty not full blob
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "0e6525c0dd261e8f62342b1139062bb23bc2b8b460163364598fb29e82a4eed5", expectedy: "1db984d6deb5e84bc67d0755aa2da8fe687233147603b4ecba94d0c8463c3836"},
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "03db68ae16ee88489d52db19e6111b25630c5f23ad7cd14530aacf0cd231d476", expectedy: "24527d0b0e93b3dec0060c7b128975a8088b3104d3a297dc807ab43862a77a1a"},
// max number of chunks only last one non-empty full blob
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "3a638eac98f22f817b84e3d81ccaa3de080f83dc80a5823a3f19320ef3cb6fc8", expectedy: "73ab100278822144e2ed8c9d986e92f7a2662fd18a51bdf96ec55848578b227a"},
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "677670193f73db499cede572bcb55677f0d2f13d690f9a820bd00bf584c3c241", expectedy: "1d85677f172dbdf4ad3094a17deeb1df4d7d2b7f35ecea44aebffa757811a268"},
// max number of chunks but last is empty
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "02ef442d99f450559647a7823f1be0e148c75481cc5c703c02a116e8ac531fa8", expectedy: "31743538cfc3ac43d1378a5c497ebc9462c20b4cb4470e0e7a9f7342ea948333"},
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "22935042dfe7df771b02c1f5cababfe508869e8f6339dabe25a8a32e37728bb0", expectedy: "48ca66fb5a094401728c3a6a517ffbd72c4d4d9a8c907e2d2f1320812f4d856f"},
} {
chunks := []*encoding.Chunk{}
@@ -587,7 +587,7 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
chunks = append(chunks, chunk)
}
b, z, err := constructBlobPayload(chunks)
b, _, z, err := constructBlobPayload(chunks)
assert.NoError(t, err)
actualZ := hex.EncodeToString(z[:])
assert.Equal(t, tc.expectedz, actualZ)
@@ -608,7 +608,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err := batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b363d27683f7aab53cf071e2c8c8f3abfe750d206c048489450d120679cdc823f7db44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607a30dad96431f70551dd950c1426131d73ccea6d050d38dea123aad90aa8c0b734c98e8e04bd8ea8f19b415f2d85156d8", hex.EncodeToString(verifyData))
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef423dc493f1dd7c9fbecdffa021ca4649b13e8d72231487034ec6b27e155ecfd7b44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607b38542ec811c92d86ff6f3319de06ee60c42655278ccf874f3615f450de730895276828b73db03c553b0bc7e5474a5e0", hex.EncodeToString(verifyData))
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
@@ -617,7 +617,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c992946dc7e51a42a31f429bc1f321dcf020b9a661225259522dba186fcfe5dc012191b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e86a0b3c76e33edb24eb07faeaa5d3f2b15a55df6ab99abf828b5803f5681dc634602eb7469ee0556563b2eccebf16ec822", hex.EncodeToString(verifyData))
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e5821975f318babe50be728f9b52754d5ce2caa2ba82ba35b5888af1c5f28d23206b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e868e0c6fe7bd39baa5ee6339cd334a18af7c680d24e825262499e83b31633b13a9ee89813fae8441630c82bc9dce3f1e07", hex.EncodeToString(verifyData))
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
@@ -626,7 +626,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a24164e6ea8b7946ce5e40d2baa4f6aa0d030076f6074295288133c00e75dafa2afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d676248f5ca4a9f0d9b7fa48f5f649dc84e928161fd99ad1bd9a9879b05d29c5f718bfb3b0a696a5f3ed50b5b8c6a9d530b3ee", hex.EncodeToString(verifyData))
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8088a01e54e3565d2e91ce6afbadf479330847d9106737875303ce17f17c48722afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d67624aee03a0f7cdb7807bc7e0b9fb20bc299af2a35e38cda816708b40f2f18db491e14a0f5d9cfe2f4c12e4ca1a219484f17", hex.EncodeToString(verifyData))
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
@@ -635,7 +635,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
@@ -644,7 +644,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
@@ -653,7 +653,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
// 15 chunks
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
@@ -661,7 +661,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2613ca15d051a539e3b239027f9bdbd03bd3c66c98afafb674e2a7441912cbe099743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc93291fbc65cfa558e4df12bcde442483d31072000c56f94fe012285bc5832eaee5fe1d47f1e8655539c4500f66207d8edc6", hex.EncodeToString(verifyData))
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b54d28f1479467d8b97fb99f5257d3e5d63a81cb2d60e3564fe6ec6066a311c119743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc932a0b603cc94be2007d4b3b17af06b4fb30caf0e574d5abcfc5654079e65154679afad75844396082a7200a4e82462aeed", hex.EncodeToString(verifyData))
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
@@ -670,7 +670,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c4330b3863672052b3d6c6552d121b0b13f97659f49bbfb6d7fed6e4b7076e4a43383bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fdae9cb71d402cfe8bc4d659f228c41f0b9d195c5074278a2346204cfaa336f5de2244a3d53e0effa2f49c81924720e84e", hex.EncodeToString(verifyData))
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab35b73ddb4a78fc4a8540f1d8259512c46e606a701e7ef7742e38cc4562ef53b983bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fda28e5610ca6b185d6ac30b53bd83d6366fccb1956daafa90ff6b504a966b119ebb45cb3f7085b7c1d622ee1ad27fcff9", hex.EncodeToString(verifyData))
}
func TestCodecV1BatchSkipBitmap(t *testing.T) {

View File

@@ -0,0 +1,91 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
// HardForkName the hard fork name
HardForkName string `json:"hard_fork_name"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -0,0 +1,89 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type LegacyAuthMsg struct {
// Message fields
Identity *LegacyIdentity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// LegacyIdentity contains all the fields to be signed by the prover.
type LegacyIdentity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *LegacyAuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *LegacyAuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *LegacyIdentity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -58,26 +58,6 @@ const (
ProofTypeBatch
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// GenerateToken generates token
func GenerateToken() (string, error) {
b := make([]byte, 16)
@@ -87,65 +67,6 @@ func GenerateToken() (string, error) {
return hex.EncodeToString(b), nil
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct {
*ProofDetail `json:"zkProof"`

View File

@@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) {
hash, err := identity.Hash()
assert.NoError(t, err)
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.85"
var tag = "v4.4.2"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -5,7 +5,7 @@
"license": "MIT",
"scripts": {
"test:hardhat": "npx hardhat test",
"test:forge": "forge test -vvv",
"test:forge": "forge test -vvv --evm-version cancun",
"test": "yarn test:hardhat && yarn test:forge",
"solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'",
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",

View File

@@ -37,8 +37,7 @@ contract DeployL1BridgeContracts is Script {
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
address L1_PLONK_VERIFIER_0_ADDR = vm.envAddress("L1_PLONK_VERIFIER_0_ADDR");
address L1_PLONK_VERIFIER_1_ADDR = vm.envAddress("L1_PLONK_VERIFIER_1_ADDR");
address L1_PLONK_VERIFIER_ADDR = vm.envAddress("L1_PLONK_VERIFIER_ADDR");
address L1_PROXY_ADMIN_ADDR = vm.envAddress("L1_PROXY_ADMIN_ADDR");
@@ -56,9 +55,7 @@ contract DeployL1BridgeContracts is Script {
address L2_SCROLL_STANDARD_ERC20_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_ADDR");
address L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR");
// TODO: refactor ZkEvmVerifierV1 into an array?
ZkEvmVerifierV1 zkEvmVerifierV1_0;
ZkEvmVerifierV1 zkEvmVerifierV1_1;
ZkEvmVerifierV1 zkEvmVerifierV1;
MultipleVersionRollupVerifier rollupVerifier;
EnforcedTxGateway enforcedTxGateway;
ProxyAdmin proxyAdmin;
@@ -69,7 +66,7 @@ contract DeployL1BridgeContracts is Script {
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
deployZkEvmVerifierV1s();
deployZkEvmVerifierV1();
deployMultipleVersionRollupVerifier();
deployL1Whitelist();
deployEnforcedTxGateway();
@@ -88,23 +85,17 @@ contract DeployL1BridgeContracts is Script {
vm.stopBroadcast();
}
// TODO: refactor
function deployZkEvmVerifierV1s() internal {
zkEvmVerifierV1_0 = new ZkEvmVerifierV1(L1_PLONK_VERIFIER_0_ADDR);
zkEvmVerifierV1_1 = new ZkEvmVerifierV1(L1_PLONK_VERIFIER_1_ADDR);
function deployZkEvmVerifierV1() internal {
zkEvmVerifierV1 = new ZkEvmVerifierV1(L1_PLONK_VERIFIER_ADDR);
logAddress("L1_ZKEVM_VERIFIER_V1_0_ADDR", address(zkEvmVerifierV1_0));
logAddress("L1_ZKEVM_VERIFIER_V1_1_ADDR", address(zkEvmVerifierV1_1));
logAddress("L1_ZKEVM_VERIFIER_V1_ADDR", address(zkEvmVerifierV1));
}
// TODO: refactor
function deployMultipleVersionRollupVerifier() internal {
uint256[] memory _versions = new uint256[](2);
address[] memory _verifiers = new address[](2);
uint256[] memory _versions = new uint256[](1);
address[] memory _verifiers = new address[](1);
_versions[0] = 0;
_verifiers[0] = address(zkEvmVerifierV1_0);
_versions[1] = 1;
_verifiers[1] = address(zkEvmVerifierV1_1);
_verifiers[0] = address(zkEvmVerifierV1);
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));

View File

@@ -477,7 +477,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
_postStateRoot,
_withdrawRoot,
_dataHash,
_blobDataProof[0:64]
_blobDataProof[0:64],
_blobVersionedHash
)
);

View File

@@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier {
}
// decodes all RLP encoded data and stores their DATA items
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region.
// [length - 128 bits | calldata offset - 128 bits] in a continuous memory region.
// Expects that the RLP starts with a list that defines the length
// of the whole RLP region.
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
@@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier {
}
// the one and only boundary check
// in case an attacker crafted a malicous payload
// in case an attacker crafted a malicious payload
// and succeeds in the prior verification steps
// then this should catch any bogus accesses
if iszero(eq(ptr, add(proof.offset, proof.length))) {

View File

@@ -83,6 +83,8 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
}
function testTransferUSDCRoles(address owner) external {
hevm.assume(owner != address(0));
// non-whitelisted caller call, should revert
hevm.expectRevert("only circle caller");
gateway.transferUSDCRoles(owner);

View File

@@ -38,7 +38,7 @@ make lint
## Configure
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
The coordinator behavior can be configured using [`conf/config.json`](conf/config.json). Check the code comments under `ProverManager` in [`internal/config/config.go`](internal/config/config.go) for more details.
## Start

View File

@@ -5,6 +5,7 @@
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"fork_name": "bernoulli",
"mock_mode": true,
"params_path": "",
"assets_path": ""

View File

@@ -1,6 +1,6 @@
module scroll-tech/coordinator
go 1.20
go 1.21
require (
github.com/appleboy/gin-jwt/v2 v2.9.1

View File

@@ -59,6 +59,7 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
@@ -95,6 +96,7 @@ github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -117,11 +119,13 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
@@ -144,6 +148,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -153,6 +158,7 @@ github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
@@ -164,6 +170,7 @@ github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -197,6 +204,7 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=

View File

@@ -50,6 +50,7 @@ type Config struct {
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
ForkName string `json:"fork_name"`
MockMode bool `json:"mock_mode"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`

View File

@@ -53,25 +53,44 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
return jwt.MapClaims{}
}
// recover the public key
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
var publicKey string
var err error
if v.Message.HardForkName != "" {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
HardForkName: v.Message.HardForkName,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
}
publicKey, err := authMsg.PublicKey()
if err != nil {
return jwt.MapClaims{}
}
if v.Message.HardForkName == "" {
v.Message.HardForkName = "shanghai"
}
return jwt.MapClaims{
types.PublicKey: publicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.HardForkName: v.Message.HardForkName,
}
}
@@ -89,5 +108,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
if proverVersion, ok := claims[types.ProverVersion]; ok {
c.Set(types.ProverVersion, proverVersion)
}
if hardForkName, ok := claims[types.HardForkName]; ok {
c.Set(types.HardForkName, hardForkName)
}
return nil
}

View File

@@ -2,6 +2,7 @@ package api
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -25,6 +26,8 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
panic("proof receiver new verifier failure")
}
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
Auth = NewAuthController(db)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)

View File

@@ -6,6 +6,8 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -21,15 +23,21 @@ import (
// GetTaskController the get prover task api controller
type GetTaskController struct {
proverTasks map[message.ProofType]provertask.ProverTask
getTaskAccessCounter *prometheus.CounterVec
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg)
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_get_task_access_count",
Help: "Multi dimensions get task counter.",
}, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}),
}
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
@@ -38,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
return ptc
}
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return fmt.Errorf("get public key from context failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return fmt.Errorf("get prover version from context failed")
}
ptc.getTaskAccessCounter.With(prometheus.Labels{
coordinatorType.LabelProverPublicKey: publicKey.(string),
coordinatorType.LabelProverName: proverName.(string),
coordinatorType.LabelProverVersion: proverVersion.(string),
}).Inc()
return nil
}
// GetTasks get assigned chunk/batch task
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
@@ -55,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
return
}
if err := ptc.incGetTaskAccessCounter(ctx); err != nil {
log.Warn("get_task access counter inc failed", "error", err.Error())
}
result, err := proverTask.Assign(ctx, &getTaskParameter)
if err != nil {
nerr := fmt.Errorf("return prover task err:%w", err)

View File

@@ -30,8 +30,8 @@ func (c *Collector) cleanupChallenge() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopCleanChallengeChan:
log.Info("the coordinator cleanupChallenge run loop exit")
return
}
}

View File

@@ -23,7 +23,10 @@ type Collector struct {
db *gorm.DB
ctx context.Context
stopTimeoutChan chan struct{}
stopChunkTimeoutChan chan struct{}
stopBatchTimeoutChan chan struct{}
stopBatchAllChunkReadyChan chan struct{}
stopCleanChallengeChan chan struct{}
proverTaskOrm *orm.ProverTask
chunkOrm *orm.Chunk
@@ -40,14 +43,17 @@ type Collector struct {
// NewCollector create a collector to cron collect the data to send to prover
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
c := &Collector{
cfg: cfg,
db: db,
ctx: ctx,
stopTimeoutChan: make(chan struct{}),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
challenge: orm.NewChallenge(db),
cfg: cfg,
db: db,
ctx: ctx,
stopChunkTimeoutChan: make(chan struct{}),
stopBatchTimeoutChan: make(chan struct{}),
stopBatchAllChunkReadyChan: make(chan struct{}),
stopCleanChallengeChan: make(chan struct{}),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
challenge: orm.NewChallenge(db),
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_timeout_checker_run_total",
@@ -83,7 +89,10 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
// Stop all the collector
func (c *Collector) Stop() {
c.stopTimeoutChan <- struct{}{}
c.stopChunkTimeoutChan <- struct{}{}
c.stopBatchTimeoutChan <- struct{}{}
c.stopBatchAllChunkReadyChan <- struct{}{}
c.stopCleanChallengeChan <- struct{}{}
}
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
@@ -113,8 +122,8 @@ func (c *Collector) timeoutBatchProofTask() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopBatchTimeoutChan:
log.Info("the coordinator timeoutBatchProofTask run loop exit")
return
}
}
@@ -146,8 +155,8 @@ func (c *Collector) timeoutChunkProofTask() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopChunkTimeoutChan:
log.Info("the coordinator timeoutChunkProofTask run loop exit")
return
}
}
@@ -253,8 +262,8 @@ func (c *Collector) checkBatchAllChunkReady() {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
case <-c.stopBatchAllChunkReadyChan:
log.Info("the coordinator checkBatchAllChunkReady run loop exit")
return
}
}

View File

@@ -21,5 +21,5 @@ func NewLoginLogic(db *gorm.DB) *LoginLogic {
// InsertChallengeString insert and check the challenge string is existed
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.challengeOrm.InsertChallenge(ctx, challenge)
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
}

View File

@@ -31,16 +31,17 @@ type BatchProverTask struct {
batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal *prometheus.CounterVec
batchTaskGetTaskProver *prometheus.CounterVec
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
vkMap: vkMap,
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
@@ -58,6 +59,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_batch_get_task_total",
Help: "Total number of batch get task.",
}, []string{"fork_name"}),
batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"),
}
return bp
}
@@ -69,9 +71,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName)
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err
}
@@ -81,9 +83,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
var endChunkIndex uint64 = math.MaxInt64
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if startChunk == nil {
@@ -92,9 +94,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
startChunkIndex = startChunk.Index
}
if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
if err != nil {
log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if toChunk != nil {
@@ -110,7 +112,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -119,7 +121,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -131,7 +133,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx, tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
@@ -166,20 +168,25 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx, &proverTask)
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil
}
@@ -235,7 +242,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx, batchTask.Hash); err != nil {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
}
}

View File

@@ -29,15 +29,16 @@ type ChunkProverTask struct {
chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal *prometheus.CounterVec
chunkTaskGetTaskProver *prometheus.CounterVec
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
vkMap: vkMap,
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
@@ -55,6 +56,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_chunk_get_task_total",
Help: "Total number of chunk get task.",
}, []string{"fork_name"}),
chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"),
}
return cp
}
@@ -66,9 +68,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName)
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err
}
@@ -83,7 +85,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -92,7 +94,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -104,7 +106,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx, tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
@@ -138,20 +140,25 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx, &proverTask)
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil
}

View File

@@ -2,8 +2,12 @@ package provertask
import (
"fmt"
"sync"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/version"
@@ -13,11 +17,12 @@ import (
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ErrCoordinatorInternalFailure coordinator internal db failure
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name
var ErrHardForkName = fmt.Errorf("wrong hard fork name")
var (
// ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name
ErrHardForkName = fmt.Errorf("wrong hard fork name")
)
// ProverTask the interface of a collector who send data to prover
type ProverTask interface {
@@ -28,8 +33,8 @@ type ProverTask interface {
type BaseProverTask struct {
cfg *config.Config
db *gorm.DB
vk string
vkMap map[string]string
nameForkMap map[string]uint64
forkHeights []uint64
@@ -44,6 +49,7 @@ type proverTaskContext struct {
PublicKey string
ProverName string
ProverVersion string
HardForkName string
}
// checkParameter check the prover task parameter illegal
@@ -68,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
}
ptc.ProverVersion = proverVersion.(string)
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, fmt.Errorf("get hard fork name from context failed")
}
ptc.HardForkName = hardForkName.(string)
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
}
vk, vkExist := b.vkMap[ptc.HardForkName]
if !vkExist {
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
}
// if the prover has a different vk
if getTaskParameter.VK != b.vk {
if getTaskParameter.VK != vk {
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
// if the prover reports a different prover version
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
@@ -82,7 +100,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx, publicKey.(string))
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
}
@@ -90,7 +108,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
}
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
if err != nil {
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
}
@@ -115,3 +133,22 @@ func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error
return hardForkNumber, nil
}
var (
getTaskCounterInitOnce sync.Once
getTaskCounterVec *prometheus.CounterVec = nil
)
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_get_task_count",
Help: "Multi dimensions get task counter.",
}, []string{"task_type",
coordinatorType.LabelProverName,
coordinatorType.LabelProverPublicKey,
coordinatorType.LabelProverVersion})
})
return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType})
}

View File

@@ -134,18 +134,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if len(pv) == 0 {
return fmt.Errorf("get ProverVersion from context failed")
}
hardForkName := ctx.GetString(coordinatorType.HardForkName)
var proverTask *orm.ProverTask
var err error
if proofParameter.UUID != "" {
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx, proofParameter.UUID, pk)
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
} else {
// TODO When prover all have upgrade, need delete this logic
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
@@ -156,29 +157,28 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
return err
}
m.verifierTotal.WithLabelValues(pv).Inc()
var success bool
success := true
var verifyErr error
if proofMsg.Type == message.ProofTypeChunk {
success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof)
} else if proofMsg.Type == message.ProofTypeBatch {
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof)
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
if proofMsg.Type == message.ProofTypeBatch {
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
}
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
if verifyErr != nil {
return ErrValidatorFailureVerifiedFailed
@@ -189,12 +189,12 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
return ErrCoordinatorInternalFailure
}
@@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
return nil
}
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
defer func() {
if err != nil {
m.validateFailureTotal.Inc()
@@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
"cannot submit valid proof for a prover task twice",
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", proverTask.ProverPublicKey,
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
)
return ErrValidatorFailureProverTaskCannotSubmitTwice
}
@@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
log.Info("proof generated by prover failed",
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", failureMsg)
"failureMessage", failureMsg, "forkName", forkName)
return ErrValidatorFailureProofMsgStatusNotOk
}
@@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
}
@@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
m.validateFailureProverTaskHaveVerifier.Inc()
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil

View File

@@ -9,8 +9,26 @@ import (
)
// NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
return &Verifier{}, nil
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.
@@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
}
// VerifyBatchProof return a mock verification result for a BatchProof.
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}

View File

@@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct {
cfg *config.VerifierConfig
BatchVK string
ChunkVK string
cfg *config.VerifierConfig
ChunkVKMap map[string]string
BatchVKMap map[string]string
}

View File

@@ -11,9 +11,11 @@ package verifier
import "C" //nolint:typecheck
import (
"embed"
"encoding/base64"
"encoding/json"
"io"
"io/fs"
"os"
"path"
"unsafe"
@@ -28,7 +30,26 @@ import (
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
return &Verifier{cfg: cfg}, nil
batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath)
@@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
C.init_batch_verifier(paramsPathStr, assetsPathStr)
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]string),
BatchVKMap: make(map[string]string),
}
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
if err != nil {
return nil, err
}
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
if err != nil {
return nil, err
}
v.BatchVKMap[cfg.ForkName] = batchVK
v.ChunkVKMap[cfg.ForkName] = chunkVK
return &Verifier{
cfg: cfg,
BatchVK: batchVK,
ChunkVK: chunkVK,
}, nil
if err := v.loadEmbedVK(); err != nil {
return nil, err
}
return v, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, batch verifier disabled")
if string(proof.Proof) == InvalidTestProof {
@@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
return false, err
}
log.Info("Start to verify batch proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
log.Info("Start to verify batch proof ...")
verified := C.verify_batch_proof(proofStr)
verified := C.verify_batch_proof(proofStr, forkNameStr)
return verified != 0, nil
}
@@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
return verified != 0, nil
}
func readVK(filePat string) (string, error) {
func (v *Verifier) readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
if err != nil {
return "", err
@@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) {
}
return base64.StdEncoding.EncodeToString(byt), nil
}
//go:embed legacy_vk/*
var legacyVKFS embed.FS
func (v *Verifier) loadEmbedVK() error {
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
if err != nil {
log.Error("load embed batch vk failure", "err", err)
return err
}
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
if err != nil {
log.Error("load embed chunk vk failure", "err", err)
return err
}
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
return nil
}

View File

@@ -49,7 +49,7 @@ func TestFFI(t *testing.T) {
t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof)
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
as.NoError(err)
as.True(batchOk)
t.Log("Verified batch proof")

View File

@@ -24,6 +24,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -54,6 +55,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -248,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{
Index: batch.Index,
Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(),
@@ -262,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ActiveAttempts: 0,
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

View File

@@ -48,6 +48,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -300,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned),
TotalAttempts: 0,
ActiveAttempts: 0,
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

View File

@@ -9,6 +9,8 @@ const (
ProverName = "prover_name"
// ProverVersion the prover version for context
ProverVersion = "prover_version"
// HardForkName the fork name for context
HardForkName = "hard_fork_name"
)
// Message the login message struct
@@ -16,6 +18,7 @@ type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
}
// LoginParameter for /login api

View File

@@ -2,7 +2,6 @@ package types
// GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct {
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"`

View File

@@ -0,0 +1,10 @@
package types
var (
// LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below.
LabelProverName = "prover_name"
// LabelProverPublicKey label name for prover public key
LabelProverPublicKey = "prover_pubkey"
// LabelProverVersion label name for prover version
LabelProverVersion = "prover_version"
)

View File

@@ -96,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
ChainID: 111,
},
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{
MockMode: true,
},
BatchCollectionTimeSec: 10,
ChunkCollectionTimeSec: 10,
MaxVerifierWorkers: 10,
@@ -113,6 +115,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
var chainConf params.ChainConfig
for forkName, forkNumber := range nameForkMap {
switch forkName {
case "shanghai":
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
case "bernoulli":
chainConf.BernoulliBlock = big.NewInt(forkNumber)
case "london":
@@ -258,12 +262,12 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err)
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
@@ -274,12 +278,12 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err)
expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
}
@@ -299,12 +303,12 @@ func testOutdatedProverVersion(t *testing.T) {
assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
}
@@ -358,7 +362,7 @@ func testHardForkAssignTask(t *testing.T) {
{
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 0,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
@@ -448,7 +452,7 @@ func testHardForkAssignTask(t *testing.T) {
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
@@ -457,7 +461,7 @@ func testHardForkAssignTask(t *testing.T) {
{
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
@@ -466,7 +470,7 @@ func testHardForkAssignTask(t *testing.T) {
{ // hard fork 2, prover 2 block [2-3]
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
@@ -534,7 +538,7 @@ func testHardForkAssignTask(t *testing.T) {
continue
}
getTaskNumber++
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success)
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
}
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
})
@@ -577,7 +581,7 @@ func testValidProof(t *testing.T) {
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
}
// verify proof status
@@ -643,34 +647,21 @@ func testInvalidProof(t *testing.T) {
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// create mock provers.
provers := make([]*mockProver, 2)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
}
proofType := message.ProofTypeBatch
provingStatus := verifiedFailed
expectErrCode := types.ErrCoordinatorHandleZkProofFailure
prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
)
var (
chunkProofStatus types.ProvingStatus
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
batchProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
batchActiveAttempts int16
batchMaxAttempts int16
)
@@ -678,24 +669,17 @@ func testInvalidProof(t *testing.T) {
for {
select {
case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
if batchProofStatus == types.ProvingTaskAssigned {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts))
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String())
return
}
}
@@ -735,7 +719,7 @@ func testProofGeneratedFailed(t *testing.T) {
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
}
// verify proof status
@@ -858,14 +842,14 @@ func testTimeoutProof(t *testing.T) {
assert.NotNil(t, proverChunkTask2)
assert.Equal(t, chunkTask2ErrCode, types.Success)
assert.Equal(t, chunkTask2ErrMsg, "")
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
assert.NotNil(t, proverBatchTask2)
assert.Equal(t, batchTask2ErrCode, types.Success)
assert.Equal(t, batchTask2ErrMsg, "")
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
// verify proof status, it should be verified now, because second prover sent valid proof
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)

View File

@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
}
// connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator(t *testing.T) string {
func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
challengeString := r.challenge(t)
return r.login(t, challengeString)
return r.login(t, challengeString, forkName)
}
func (r *mockProver) challenge(t *testing.T) string {
@@ -76,18 +76,32 @@ func (r *mockProver) challenge(t *testing.T) string {
return loginData.Token
}
func (r *mockProver) login(t *testing.T, challengeString string) string {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
},
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
var body string
if forkName != "" {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
HardForkName: forkName,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
var result ctypes.Response
client := resty.New()
@@ -137,7 +151,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
// get task from coordinator
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
type response struct {
@@ -151,7 +165,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -160,9 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
}
// Testing expected errors returned by coordinator.
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
//
//nolint:unparam
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
// get task from coordinator
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
type response struct {
@@ -185,7 +201,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
return result.ErrCode, result.ErrMsg
}
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
proofMsgStatus := message.StatusOk
if proofStatus == generatedFailed {
proofMsgStatus = message.StatusProofError
@@ -228,7 +244,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
submitProof.Proof = string(encodeData)
}
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
submitProofData, err := json.Marshal(submitProof)

View File

@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(16), cur)
assert.Equal(t, int64(20), cur)
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(16), cur)
assert.Equal(t, int64(20), cur)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(16), version)
assert.Equal(t, int64(20), version)
assert.NoError(t, Rollback(pgDB, nil))

View File

@@ -0,0 +1,27 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE chunk
ADD COLUMN crc_max INTEGER DEFAULT 0,
ADD COLUMN blob_size INTEGER DEFAULT 0;
ALTER TABLE batch
ADD COLUMN data_hash VARCHAR DEFAULT '',
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
ADD COLUMN blob_size INTEGER DEFAULT 0;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS batch
DROP COLUMN data_hash,
DROP COLUMN blob_data_proof,
DROP COLUMN blob_size;
ALTER TABLE IF EXISTS chunk
DROP COLUMN crc_max,
DROP COLUMN blob_size;
-- +goose StatementEnd

View File

@@ -0,0 +1,18 @@
-- +goose Up
-- +goose StatementBegin
create index if not exists idx_prover_task_created_at on prover_task(created_at) where deleted_at IS NULL;
create index if not exists idx_prover_task_task_id on prover_task(task_id) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop index if exists idx_prover_task_created_at;
drop index if exists idx_prover_task_task_id;
-- +goose StatementEnd

View File

@@ -0,0 +1,15 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE l1_block
ADD COLUMN blob_base_fee BIGINT DEFAULT 0;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS l1_block
DROP COLUMN blob_base_fee;
-- +goose StatementEnd

View File

@@ -0,0 +1,17 @@
-- +goose Up
-- +goose StatementBegin
DROP INDEX if exists idx_prover_block_list_on_public_key;
CREATE UNIQUE INDEX if not exists uniq_prover_block_list_on_public_key ON prover_block_list(public_key) where deleted_at IS NULL;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
CREATE INDEX if not exists idx_prover_block_list_on_public_key ON prover_block_list(public_key);
DROP INDEX if exists uniq_prover_block_list_on_public_key;
-- +goose StatementEnd

View File

@@ -21,14 +21,15 @@ import (
type CoordinatorClient struct {
client *resty.Client
proverName string
priv *ecdsa.PrivateKey
proverName string
hardForkName string
priv *ecdsa.PrivateKey
mu sync.Mutex
}
// NewCoordinatorClient constructs a new CoordinatorClient.
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
client := resty.New().
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
SetRetryCount(cfg.RetryCount).
@@ -50,9 +51,10 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
"retry wait time (second)", cfg.RetryWaitTimeSec)
return &CoordinatorClient{
client: client,
proverName: proverName,
priv: priv,
client: client,
proverName: proverName,
hardForkName: hardForkName,
priv: priv,
}, nil
}
@@ -83,6 +85,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
ProverVersion: version.Version,
ProverName: c.proverName,
Challenge: challengeResult.Data.Token,
HardForkName: c.hardForkName,
},
}
@@ -97,10 +100,12 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
}{
Challenge: authMsg.Identity.Challenge,
ProverName: authMsg.Identity.ProverName,
ProverVersion: authMsg.Identity.ProverVersion,
HardForkName: authMsg.Identity.HardForkName,
},
Signature: authMsg.Signature,
}

View File

@@ -25,6 +25,7 @@ type LoginRequest struct {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
} `json:"message"`
Signature string `json:"signature"`
}
@@ -41,7 +42,6 @@ type LoginResponse struct {
// GetTaskRequest defines the request structure for GetTask API
type GetTaskRequest struct {
HardForkName string `json:"hard_fork_name"`
TaskType message.ProofType `json:"task_type"`
ProverHeight uint64 `json:"prover_height,omitempty"`
VK string `json:"vk"`

View File

@@ -82,7 +82,7 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
}
log.Info("init prover_core successfully!")
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv)
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv)
if err != nil {
return nil, err
}
@@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error {
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// prepare the request
req := &client.GetTaskRequest{
HardForkName: r.cfg.HardForkName,
TaskType: r.Type(),
TaskType: r.Type(),
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
// instead of passing vk when we login
VK: r.proverCore.VK,

View File

@@ -24,7 +24,7 @@ rollup_relayer: ## Builds the rollup_relayer bin
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
lint: ## Lint the files - used for CI
lint: mock_abi ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder

View File

@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
}
})
log.Info("Start event-watcher successfully")
log.Info("Start event-watcher successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -78,9 +78,15 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL1GasOracle, registry)
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
}
@@ -109,7 +115,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
log.Info("Start gas-oracle successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
log.Info("Start rollup-relayer successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -19,7 +19,9 @@
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000
"gas_price_diff": 50000,
"l1_base_fee_weight": 0.132,
"l1_blob_base_fee_weight": 0.145
},
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
}

View File

@@ -3,7 +3,7 @@ module scroll-tech/rollup
go 1.21
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/agiledragon/gomonkey/v2 v2.11.0
github.com/consensys/gnark-crypto v0.12.1
github.com/crate-crypto/go-kzg-4844 v0.7.0
github.com/gin-gonic/gin v1.9.1

View File

@@ -2,8 +2,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/agiledragon/gomonkey/v2 v2.11.0 h1:5oxSgA+tC1xuGsrIorR+sYiziYltmJyEZ9qA25b6l5U=
github.com/agiledragon/gomonkey/v2 v2.11.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=

View File

@@ -70,8 +70,14 @@ type RelayerConfig struct {
type GasOracleConfig struct {
// MinGasPrice store the minimum gas price to set.
MinGasPrice uint64 `json:"min_gas_price"`
// GasPriceDiff store the percentage of gas price difference.
// GasPriceDiff is the minimum percentage of gas price difference to update gas oracle.
GasPriceDiff uint64 `json:"gas_price_diff"`
// The following configs are only for updating L1 gas price, used for sender in L2.
// The weight for L1 base fee.
L1BaseFeeWeight float64 `json:"l1_base_fee_weight"`
// The weight for L1 blob base fee.
L1BlobBaseFeeWeight float64 `json:"l1_blob_base_fee_weight"`
}
// relayerConfigAlias RelayerConfig alias name

View File

@@ -3,12 +3,14 @@ package relayer
import (
"context"
"fmt"
"math"
"math/big"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
@@ -28,21 +30,26 @@ import (
type Layer1Relayer struct {
ctx context.Context
cfg *config.RelayerConfig
cfg *config.RelayerConfig
chainCfg *params.ChainConfig
gasOracleSender *sender.Sender
l1GasOracleABI *abi.ABI
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
l1BaseFeeWeight float64
l1BlobBaseFeeWeight float64
l1BlockOrm *orm.L1Block
metrics *l1RelayerMetrics
l2BlockOrm *orm.L2Block
metrics *l1RelayerMetrics
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer1Relayer, error) {
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer1Relayer, error) {
var gasOracleSender *sender.Sender
var err error
@@ -74,14 +81,18 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
l1Relayer := &Layer1Relayer{
cfg: cfg,
chainCfg: chainCfg,
ctx: ctx,
l1BlockOrm: orm.NewL1Block(db),
l2BlockOrm: orm.NewL2Block(db),
gasOracleSender: gasOracleSender,
l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
l1BaseFeeWeight: cfg.GasOracleConfig.L1BaseFeeWeight,
l1BlobBaseFeeWeight: cfg.GasOracleConfig.L1BlobBaseFeeWeight,
}
l1Relayer.metrics = initL1RelayerMetrics(reg)
@@ -123,12 +134,27 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
if r.lastGasPrice > 0 && expectedDelta == 0 {
expectedDelta = 1
}
// last is undefine or (block.BaseFee >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (block.BaseFee >= r.minGasPrice && (block.BaseFee >= r.lastGasPrice+expectedDelta || block.BaseFee <= r.lastGasPrice-expectedDelta)) {
baseFee := big.NewInt(int64(block.BaseFee))
data, err := r.l1GasOracleABI.Pack("setL1BaseFee", baseFee)
latestL2Height, err := r.l2BlockOrm.GetL2BlocksLatestHeight(r.ctx)
if err != nil {
log.Warn("Failed to fetch latest L2 block height from db", "err", err)
return
}
var isBernoulli = r.chainCfg.IsBernoulli(new(big.Int).SetUint64(latestL2Height))
var baseFee uint64
if isBernoulli && block.BlobBaseFee != 0 {
baseFee = uint64(math.Ceil(r.l1BaseFeeWeight*float64(block.BaseFee) + r.l1BlobBaseFeeWeight*float64(block.BlobBaseFee)))
} else {
baseFee = block.BaseFee
}
// last is undefined or (baseFee >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (baseFee >= r.minGasPrice && (baseFee >= r.lastGasPrice+expectedDelta || baseFee <= r.lastGasPrice-expectedDelta)) {
data, err := r.l1GasOracleABI.Pack("setL1BaseFee", new(big.Int).SetUint64(baseFee))
if err != nil {
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", block.BaseFee, "err", err)
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "isBernoulli", isBernoulli, "err", err)
return
}
@@ -143,9 +169,9 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
return
}
r.lastGasPrice = block.BaseFee
r.lastGasPrice = baseFee
r.metrics.rollupL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "isBernoulli", isBernoulli)
}
}
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/params"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
@@ -35,7 +36,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewL1Relayer(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, ServiceTypeL1GasOracle, nil)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, ServiceTypeL1GasOracle, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
@@ -57,7 +58,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ServiceTypeL1GasOracle, nil)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, &params.ChainConfig{}, ServiceTypeL1GasOracle, nil)
assert.NoError(t, err)
defer l1Relayer.StopSenders()
@@ -90,7 +91,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ServiceTypeL1GasOracle, nil)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, &params.ChainConfig{}, ServiceTypeL1GasOracle, nil)
assert.NoError(t, err)
assert.NotNil(t, l1Relayer)
defer l1Relayer.StopSenders()

View File

@@ -578,13 +578,31 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
return err
}
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash)
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
// record and sync with db, @todo handle db error
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
return err
}
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
if !withProof {
txErr := r.db.Transaction(func(tx *gorm.DB) error {
if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
return updateErr
}
if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
return updateErr
}
return nil
})
if txErr != nil {
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr)
}
}
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
return nil
}

View File

@@ -7,6 +7,7 @@ import (
"net/http"
"strings"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/gin-gonic/gin"
@@ -181,9 +182,9 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
batch := &encoding.Batch{
@@ -200,11 +201,30 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
assert.NoError(t, err)
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
time.Sleep(time.Second)
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
if batchErr != nil {
return false
}
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
if chunkErr != nil {
return false
}
batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing &&
types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
return batchStatus && chunkStatus
})
assert.True(t, ok)
relayer.StopSenders()

View File

@@ -9,6 +9,7 @@ import (
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/consensus/misc"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
@@ -135,10 +136,16 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
baseFee = block.BaseFee.Uint64()
}
var blobBaseFee uint64
if excess := block.ExcessBlobGas; excess != nil {
blobBaseFee = misc.CalcBlobFee(*excess).Uint64()
}
l1Block := orm.L1Block{
Number: blockHeight,
Hash: block.Hash().String(),
BaseFee: baseFee,
BlobBaseFee: blobBaseFee,
GasOracleStatus: int16(types.GasOraclePending),
}

View File

@@ -25,6 +25,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -53,6 +54,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
@@ -257,6 +262,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
newBatch := Batch{
Index: batch.Index,
Hash: batchMeta.BatchHash.Hex(),
DataHash: batchMeta.BatchDataHash.Hex(),
StartChunkHash: batchMeta.StartChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: batchMeta.EndChunkHash.Hex(),
@@ -271,6 +277,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
BlobDataProof: batchMeta.BatchBlobDataProof,
BlobSize: metrics.L1CommitBlobSize,
}
db := o.db

View File

@@ -44,6 +44,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -140,6 +144,20 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) (
return chunks, nil
}
// GetChunksByBatchHash retrieves chunks by batch hash
// for test
func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash = ?", batchHash)
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetChunksByBatchHash error: %w", err)
}
return chunks, nil
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
@@ -198,6 +216,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
CrcMax: metrics.CrcMax,
BlobSize: metrics.L1CommitBlobSize,
}
db := o.db
@@ -242,6 +262,34 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
return nil
}
// UpdateProvingStatusByBatchHash updates the proving_status for chunks within the specified batch_hash
func (o *Chunk) UpdateProvingStatusByBatchHash(ctx context.Context, batchHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash = ?", batchHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatusByBatchHash error: %w, batch hash: %v, status: %v", err, batchHash, status.String())
}
return nil
}
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices.
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {

View File

@@ -15,9 +15,10 @@ type L1Block struct {
db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"column:number"`
Hash string `json:"hash" gorm:"column:hash"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
Number uint64 `json:"number" gorm:"column:number"`
Hash string `json:"hash" gorm:"column:hash"`
BaseFee uint64 `json:"base_fee" gorm:"column:base_fee"`
BlobBaseFee uint64 `json:"blob_base_fee" gorm:"column:blob_base_fee"`
// oracle
GasOracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`

View File

@@ -191,10 +191,12 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code
// BatchMetadata represents the metadata of a batch.
type BatchMetadata struct {
BatchHash common.Hash
BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
BatchHash common.Hash
BatchDataHash common.Hash
BatchBlobDataProof []byte
BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
}
// GetBatchMetadata retrieves the metadata of a batch.
@@ -212,9 +214,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
}
// BatchBlobDataProof is left as empty for codecv0.
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash,
BatchBytes: daBatch.Encode(),
}
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
@@ -243,9 +247,16 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
}
blobDataProof, err := daBatch.BlobDataProof()
if err != nil {
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
}
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash,
BatchBlobDataProof: blobDataProof,
BatchBytes: daBatch.Encode(),
}
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)

View File

@@ -28,7 +28,7 @@ func testImportL1GasPrice(t *testing.T) {
l1Cfg := rollupApp.Config.L1Config
// Create L1Relayer
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, relayer.ServiceTypeL1GasOracle, nil)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, &params.ChainConfig{}, relayer.ServiceTypeL1GasOracle, nil)
assert.NoError(t, err)
defer l1Relayer.StopSenders()

View File

@@ -21,7 +21,7 @@ func testProcessStart(t *testing.T) {
defer database.CloseDB(db)
rollupApp.RunApp(t, cutils.EventWatcherApp)
rollupApp.RunApp(t, cutils.GasOracleApp)
rollupApp.RunApp(t, cutils.GasOracleApp, "--genesis", "../conf/genesis.json")
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json")
rollupApp.WaitExit()
@@ -39,7 +39,7 @@ func testProcessStartEnableMetrics(t *testing.T) {
port, err = rand.Int(rand.Reader, big.NewInt(10000))
assert.NoError(t, err)
svrPort = strconv.FormatInt(port.Int64()+20000, 10)
rollupApp.RunApp(t, cutils.GasOracleApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
rollupApp.RunApp(t, cutils.GasOracleApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json")
port, err = rand.Int(rand.Reader, big.NewInt(10000))
assert.NoError(t, err)

View File

@@ -21,6 +21,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -49,6 +50,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -151,6 +156,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{
Index: batch.Index,
Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(),
@@ -163,6 +169,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

Some files were not shown because too many files have changed in this diff Show More