mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
69 Commits
v4.4.18
...
curie-test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d8be3f95a9 | ||
|
|
30653f2d6c | ||
|
|
8f7548e0bc | ||
|
|
3be39fa807 | ||
|
|
37f875924d | ||
|
|
07f1691c92 | ||
|
|
a3879f30ef | ||
|
|
060cb2570d | ||
|
|
8b83d28963 | ||
|
|
c0a6d2b304 | ||
|
|
c36fcc958e | ||
|
|
ea9c9f0c07 | ||
|
|
4cfe815b1d | ||
|
|
0f7e74ff70 | ||
|
|
224089a625 | ||
|
|
657a252ab4 | ||
|
|
39a9c1f80b | ||
|
|
14d2f0c06c | ||
|
|
90d87739f7 | ||
|
|
4dd086e481 | ||
|
|
bda2219631 | ||
|
|
c773457ece | ||
|
|
84aedfcee7 | ||
|
|
03041420ec | ||
|
|
e417322635 | ||
|
|
e657d55498 | ||
|
|
b215558a42 | ||
|
|
9dca413294 | ||
|
|
2ba544a6f2 | ||
|
|
667e906c64 | ||
|
|
a84c18e79f | ||
|
|
900e161cfc | ||
|
|
16691a12a3 | ||
|
|
191078b6c5 | ||
|
|
c9e82b5eea | ||
|
|
1dda44d8bf | ||
|
|
4fd4adf330 | ||
|
|
f023e148f9 | ||
|
|
7d5f90bfe5 | ||
|
|
9ec78b4b99 | ||
|
|
3ebc4cb2b6 | ||
|
|
b9e9457577 | ||
|
|
aa01ea96fa | ||
|
|
4975a078fa | ||
|
|
cec4467dcd | ||
|
|
43a72bc352 | ||
|
|
daedc98b29 | ||
|
|
80bb47de64 | ||
|
|
5792d950c0 | ||
|
|
f430a8e98b | ||
|
|
2a066bac2a | ||
|
|
56db1a8de1 | ||
|
|
072e7ec77f | ||
|
|
f4d26e955d | ||
|
|
221cec132d | ||
|
|
7a1fe906bc | ||
|
|
8be3a6d3b5 | ||
|
|
785d2190d0 | ||
|
|
78e5b50b9a | ||
|
|
c3909d1dd2 | ||
|
|
7cc7de4b56 | ||
|
|
bd9e999292 | ||
|
|
cf83df64a0 | ||
|
|
c580cd5c95 | ||
|
|
3eb08b7ad4 | ||
|
|
5a6d5bf267 | ||
|
|
dd2ee77c0f | ||
|
|
4240be9dec | ||
|
|
a93a23db21 |
53
.github/workflows/docker.yml
vendored
53
.github/workflows/docker.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -91,7 +91,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -136,7 +136,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -279,51 +279,6 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: bridgehistoryapi-db-cli
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: bridgehistoryapi-db-cli
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -361,7 +316,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
255
.github/workflows/intermediate-docker.yml
vendored
255
.github/workflows/intermediate-docker.yml
vendored
@@ -4,65 +4,32 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
GO_VERSION:
|
||||
description: "Go version"
|
||||
description: 'Go version'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- "1.20"
|
||||
- "1.21"
|
||||
- "1.22"
|
||||
- "1.23"
|
||||
default: "1.21"
|
||||
type: string
|
||||
default: '1.21'
|
||||
RUST_VERSION:
|
||||
description: "Rust toolchain version"
|
||||
description: 'Rust toolchain version'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
default: "nightly-2023-12-03"
|
||||
type: string
|
||||
default: 'nightly-2023-12-03'
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
description: 'Python version'
|
||||
required: false
|
||||
type: choice
|
||||
options:
|
||||
- "3.10"
|
||||
default: "3.10"
|
||||
type: string
|
||||
default: '3.10'
|
||||
CUDA_VERSION:
|
||||
description: "Cuda version"
|
||||
description: 'Cuda version'
|
||||
required: false
|
||||
type: choice
|
||||
options:
|
||||
- "11.7.1"
|
||||
- "12.2.2"
|
||||
default: "11.7.1"
|
||||
CARGO_CHEF_TAG:
|
||||
description: "Cargo chef version"
|
||||
required: true
|
||||
default: "0.1.41"
|
||||
type: choice
|
||||
options:
|
||||
- 0.1.41
|
||||
BASE_IMAGE:
|
||||
description: "which intermediate image you want to update"
|
||||
required: true
|
||||
default: "go-alpine-builder"
|
||||
type: choice
|
||||
options:
|
||||
- cuda-go-rust-builder
|
||||
- go-rust-builder
|
||||
- go-alpine-builder
|
||||
- rust-builder
|
||||
- rust-alpine-builder
|
||||
- go-rust-alpine-builder
|
||||
- py-runner
|
||||
type: string
|
||||
default: '11.7.1'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: "build/dockerfiles/intermediate"
|
||||
working-directory: 'build/dockerfiles/intermediate'
|
||||
|
||||
jobs:
|
||||
build-and-publish-intermediate:
|
||||
build-and-publish-cuda-go-rust-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -76,37 +43,177 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: set tag env
|
||||
run: |
|
||||
if [ ${{github.event.inputs.BASE_IMAGE}} == "cuda-go-rust-builder" ]; then
|
||||
echo "TAG=cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
|
||||
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-rust-builder" ]; then
|
||||
echo "TAG=go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
|
||||
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-alpine-builder" ]; then
|
||||
echo "TAG=${{ github.event.inputs.GO_VERSION }}" >> $GITHUB_ENV
|
||||
elif [ ${{github.event.inputs.BASE_IMAGE}} == "rust-builder" ]; then
|
||||
echo "TAG=${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
|
||||
elif [ ${{github.event.inputs.BASE_IMAGE}} == "rust-alpine-builder" ]; then
|
||||
echo "TAG=${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
|
||||
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-rust-alpine-builder" ]; then
|
||||
echo "TAG=go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
|
||||
elif [ ${{github.event.inputs.BASE_IMAGE}} == "py-runner" ]; then
|
||||
echo "TAG=${{ github.event.inputs.PYTHON_VERSION }}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "no BASE_IMAGE match"
|
||||
fi
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/${{ github.event.inputs.BASE_IMAGE }}.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/${{ github.event.inputs.BASE_IMAGE }}:${{ env.TAG }}
|
||||
file: build/dockerfiles/intermediate/cuda-go-rust-builder.Dockerfile
|
||||
tags: scrolltech/cuda-go-rust-builder:cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
CUDA_VERSION=${{ github.event.inputs.CUDA_VERSION }}
|
||||
GO_VERSION=${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION=${{ github.event.inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION=${{ github.event.inputs.PYTHON_VERSION }}
|
||||
CARGO_CHEF_TAG=${{ github.event.inputs.CARGO_CHEF_TAG }}
|
||||
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-go-rust-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/go-rust-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/go-rust-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-go-alpine-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/go-alpine-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/go-alpine-builder:${{ github.event.inputs.GO_VERSION }}
|
||||
build-args: |
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-rust-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/rust-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/rust-builder:${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-rust-alpine-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/rust-alpine-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/rust-alpine-builder:${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-go-rust-alpine-builder:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/go-rust-alpine-builder.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/go-rust-alpine-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
|
||||
build-args: |
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
build-and-publish-py-runner:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build image
|
||||
id: build
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
file: build/dockerfiles/intermediate/py-runner.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: scrolltech/py-runner:${{ github.event.inputs.PYTHON_VERSION }}
|
||||
build-args: |
|
||||
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
|
||||
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
|
||||
|
||||
|
||||
137
.github/workflows/prover.yml
vendored
137
.github/workflows/prover.yml
vendored
@@ -25,75 +25,78 @@ defaults:
|
||||
working-directory: 'prover'
|
||||
|
||||
jobs:
|
||||
skip_check:
|
||||
test:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
paths_ignore: '["**/README.md"]'
|
||||
|
||||
fmt:
|
||||
needs: [skip_check]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: rustfmt
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Cargo check
|
||||
run: cargo check --all-features
|
||||
- name: Cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
needs: [skip_check, fmt]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: clippy
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Run clippy
|
||||
run: cargo clippy --all-features --all-targets -- -D warnings
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Test
|
||||
run: |
|
||||
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prover
|
||||
compile:
|
||||
needs: [skip_check, clippy]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Test
|
||||
run: |
|
||||
make prover
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Test
|
||||
run: |
|
||||
make prover
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Lint
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/prover/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
2
.github/workflows/rollup.yml
vendored
2
.github/workflows/rollup.yml
vendored
@@ -105,7 +105,7 @@ jobs:
|
||||
- name: Test rollup packages
|
||||
working-directory: 'rollup'
|
||||
run: |
|
||||
make test
|
||||
./run_test.sh
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -4,8 +4,6 @@ assets/seed
|
||||
|
||||
# Built binaries
|
||||
build/bin
|
||||
verifier.test
|
||||
core.test
|
||||
|
||||
coverage.txt
|
||||
*.integration.txt
|
||||
@@ -22,5 +20,3 @@ coverage.txt
|
||||
# misc
|
||||
sftp-config.json
|
||||
*~
|
||||
|
||||
target
|
||||
|
||||
12
README.md
12
README.md
@@ -46,7 +46,19 @@ make dev_docker
|
||||
Run the tests using the following commands:
|
||||
|
||||
```bash
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
export SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
sudo mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
sudo wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCROLL_LIB_PATH
|
||||
export CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
|
||||
@@ -13,6 +13,7 @@ RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
RUN cargo build --release
|
||||
RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
@@ -34,6 +35,7 @@ FROM base as builder
|
||||
COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
|
||||
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
|
||||
RUN cd ./coordinator && make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -15,14 +18,38 @@ RUN go mod download -x
|
||||
# Build event_watcher
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/event_watcher/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/event_watcher
|
||||
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
|
||||
|
||||
# Pull event_watcher into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
COPY --from=builder /bin/event_watcher /bin/
|
||||
WORKDIR /app
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -15,14 +18,38 @@ RUN go mod download -x
|
||||
# Build gas_oracle
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle
|
||||
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
|
||||
|
||||
# Pull gas_oracle into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
COPY --from=builder /bin/gas_oracle /bin/
|
||||
WORKDIR /app
|
||||
|
||||
@@ -29,14 +29,7 @@ RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
# Install Go
|
||||
ARG GO_VERSION
|
||||
RUN rm -rf /usr/local/go
|
||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
echo amd64 >/tmp/arch; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
echo arm64 >/tmp/arch; \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
@@ -25,14 +25,7 @@ RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
# Install Go
|
||||
ARG GO_VERSION
|
||||
RUN rm -rf /usr/local/go
|
||||
RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
echo amd64 >/tmp/arch; \
|
||||
elif [ "$(uname -m)" = "aarch64" ]; then \
|
||||
echo arm64 >/tmp/arch; \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -15,14 +18,38 @@ RUN go mod download -x
|
||||
# Build rollup_relayer
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
|
||||
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
COPY --from=builder /bin/rollup_relayer /bin/
|
||||
WORKDIR /app
|
||||
|
||||
4
common/libzkp/.gitignore
vendored
4
common/libzkp/.gitignore
vendored
@@ -1,4 +0,0 @@
|
||||
outputs
|
||||
libzkp.so
|
||||
test_zkp_test/
|
||||
*log
|
||||
@@ -1,44 +0,0 @@
|
||||
set -xeu
|
||||
set -o pipefail
|
||||
|
||||
export CHAIN_ID=534352
|
||||
export RUST_BACKTRACE=full
|
||||
export RUST_LOG=debug
|
||||
export RUST_MIN_STACK=100000000
|
||||
export PROVER_OUTPUT_DIR=test_zkp_test
|
||||
#export LD_LIBRARY_PATH=/:/usr/local/cuda/lib64
|
||||
|
||||
mkdir -p $PROVER_OUTPUT_DIR
|
||||
|
||||
REPO=$(realpath ../..)
|
||||
|
||||
function build_test_bins() {
|
||||
cd impl
|
||||
cargo build --release
|
||||
ln -f -s $(realpath target/release/libzkp.so) $REPO/prover/core/lib
|
||||
ln -f -s $(realpath target/release/libzkp.so) $REPO/coordinator/internal/logic/verifier/lib
|
||||
cd $REPO/prover
|
||||
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
|
||||
cd $REPO/coordinator
|
||||
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
|
||||
cd $REPO/common/libzkp
|
||||
}
|
||||
|
||||
function build_test_bins_old() {
|
||||
cd $REPO
|
||||
cd prover
|
||||
make libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
|
||||
cd ..
|
||||
cd coordinator
|
||||
make libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
|
||||
cd ..
|
||||
cd common/libzkp
|
||||
}
|
||||
|
||||
build_test_bins
|
||||
#rm -rf test_zkp_test/*
|
||||
#rm -rf prover.log verifier.log
|
||||
#$REPO/prover/core.test -test.v 2>&1 | tee prover.log
|
||||
$REPO/coordinator/verifier.test -test.v 2>&1 | tee verifier.log
|
||||
503
common/libzkp/impl/Cargo.lock
generated
503
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -25,7 +25,7 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.1", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
.PHONY: help fmt clippy test test-ci test-all
|
||||
|
||||
build:
|
||||
@cargo build --release
|
||||
|
||||
fmt:
|
||||
@cargo fmt --all -- --check
|
||||
|
||||
|
||||
@@ -8,10 +8,9 @@ use crate::{
|
||||
use libc::c_char;
|
||||
use prover::{
|
||||
aggregator::{Prover, Verifier},
|
||||
check_chunk_hashes,
|
||||
consts::AGG_VK_FILENAME,
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof,
|
||||
BatchProof, BlockTrace, ChunkHash, ChunkProof,
|
||||
};
|
||||
use snark_verifier_sdk::verify_evm_calldata;
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
@@ -80,7 +79,7 @@ pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *con
|
||||
|
||||
let prover_ref = PROVER.get().expect("failed to get reference to PROVER.");
|
||||
|
||||
let valid = prover_ref.check_protocol_of_chunks(&chunk_proofs);
|
||||
let valid = prover_ref.check_chunk_proofs(&chunk_proofs);
|
||||
Ok(valid)
|
||||
})
|
||||
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
|
||||
@@ -109,7 +108,7 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
let chunk_hashes = c_char_to_vec(chunk_hashes);
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
|
||||
let chunk_hashes = serde_json::from_slice::<Vec<ChunkInfo>>(&chunk_hashes)
|
||||
let chunk_hashes = serde_json::from_slice::<Vec<ChunkHash>>(&chunk_hashes)
|
||||
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
|
||||
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
|
||||
@@ -119,19 +118,15 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
chunk_hashes.len(), chunk_proofs.len()));
|
||||
}
|
||||
|
||||
let chunk_hashes_proofs: Vec<(_,_)> = chunk_hashes
|
||||
let chunk_hashes_proofs = chunk_hashes
|
||||
.into_iter()
|
||||
.zip(chunk_proofs.clone())
|
||||
.zip(chunk_proofs)
|
||||
.collect();
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).map_err(|e| format!("failed to check chunk info: {e:?}"))?;
|
||||
|
||||
let batch = BatchProvingTask {
|
||||
chunk_proofs
|
||||
};
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.expect("failed to get mutable reference to PROVER.")
|
||||
.gen_agg_evm_proof(batch, None, OUTPUT_DIR.as_deref())
|
||||
.gen_agg_evm_proof(chunk_hashes_proofs, None, OUTPUT_DIR.as_deref())
|
||||
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
|
||||
|
||||
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
|
||||
@@ -162,18 +157,19 @@ pub unsafe extern "C" fn verify_batch_proof(
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"bernoulli" => 2,
|
||||
"curie" => 3,
|
||||
"" => 0,
|
||||
"shanghai" => 0,
|
||||
"bernoulli" => 1,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as curie");
|
||||
3
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
|
||||
1
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 2 {
|
||||
// before upgrade#3(DA Compression)
|
||||
if fork_id == 0 {
|
||||
// before upgrade#2(EIP4844)
|
||||
verify_evm_calldata(
|
||||
include_bytes!("plonk_verifier_0.10.3.bin").to_vec(),
|
||||
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
|
||||
proof.calldata(),
|
||||
)
|
||||
} else {
|
||||
@@ -191,7 +187,7 @@ pub unsafe extern "C" fn block_traces_to_chunk_info(block_traces: *const c_char)
|
||||
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces).unwrap();
|
||||
|
||||
let witness_block = chunk_trace_to_witness_block(block_traces).unwrap();
|
||||
let chunk_info = ChunkInfo::from_witness_block(&witness_block, false);
|
||||
let chunk_info = ChunkHash::from_witness_block(&witness_block, false);
|
||||
|
||||
let chunk_info_bytes = serde_json::to_vec(&chunk_info).unwrap();
|
||||
vec_to_c_char(chunk_info_bytes)
|
||||
|
||||
@@ -10,7 +10,7 @@ use prover::{
|
||||
consts::CHUNK_VK_FILENAME,
|
||||
utils::init_env_and_log,
|
||||
zkevm::{Prover, Verifier},
|
||||
BlockTrace, ChunkProof, ChunkProvingTask,
|
||||
BlockTrace, ChunkProof,
|
||||
};
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
@@ -71,12 +71,11 @@ pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const
|
||||
let block_traces = c_char_to_vec(block_traces);
|
||||
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
|
||||
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
|
||||
let chunk = ChunkProvingTask::from(block_traces);
|
||||
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.expect("failed to get mutable reference to PROVER.")
|
||||
.gen_chunk_proof(chunk, None, None, OUTPUT_DIR.as_deref())
|
||||
.gen_chunk_proof(block_traces, None, None, OUTPUT_DIR.as_deref())
|
||||
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
|
||||
|
||||
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
|
||||
|
||||
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
Binary file not shown.
Binary file not shown.
@@ -183,12 +183,6 @@ type ChunkInfo struct {
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
}
|
||||
|
||||
// SubCircuitRowUsage tracing info added in v0.11.0rc8
|
||||
type SubCircuitRowUsage struct {
|
||||
Name string `json:"name"`
|
||||
RowNumber uint64 `json:"row_number"`
|
||||
}
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
@@ -197,9 +191,8 @@ type ChunkProof struct {
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestProofDetailHash(t *testing.T) {
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "01128ea9006601146ba80dbda959c96ebaefca463e78570e473a57d821db5ec1"
|
||||
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.18"
|
||||
var tag = "v4.4.8"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -882,6 +882,17 @@ error ErrorIncorrectPreviousStateRoot()
|
||||
*Thrown when the previous state root doesn't match stored one.*
|
||||
|
||||
|
||||
### ErrorInvalidBatchHeaderVersion
|
||||
|
||||
```solidity
|
||||
error ErrorInvalidBatchHeaderVersion()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the batch header version is invalid.*
|
||||
|
||||
|
||||
### ErrorLastL1MessageSkipped
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -8,7 +8,7 @@ remappings = [] # a list of remapp
|
||||
libraries = [] # a list of deployed libraries to link against
|
||||
cache = true # whether to cache builds or not
|
||||
force = true # whether to ignore the cache (clean build)
|
||||
evm_version = 'cancun' # the evm version (by hardfork name)
|
||||
# evm_version = 'london' # the evm version (by hardfork name)
|
||||
solc_version = '0.8.24' # override for the solc version (setting this ignores `auto_detect_solc`)
|
||||
optimizer = true # enable or disable the solc optimizer
|
||||
optimizer_runs = 200 # the number of optimizer runs
|
||||
|
||||
@@ -98,6 +98,19 @@ describe("ScrollChain.blob", async () => {
|
||||
batchHeader0[25] = 1;
|
||||
});
|
||||
|
||||
it("should revert when ErrorInvalidBatchHeaderVersion", async () => {
|
||||
const header = new Uint8Array(121);
|
||||
header[0] = 2;
|
||||
await expect(chain.commitBatch(1, header, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorInvalidBatchHeaderVersion"
|
||||
);
|
||||
await expect(chain.commitBatch(2, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorInvalidBatchHeaderVersion"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert when ErrorNoBlobFound", async () => {
|
||||
await expect(chain.commitBatch(1, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
|
||||
@@ -96,7 +96,7 @@ contract DeployL1BridgeContracts is Script {
|
||||
address[] memory _verifiers = new address[](1);
|
||||
_versions[0] = 0;
|
||||
_verifiers[0] = address(zkEvmVerifierV1);
|
||||
rollupVerifier = new MultipleVersionRollupVerifier(_versions, _verifiers);
|
||||
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ pragma solidity =0.8.24;
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
|
||||
import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
@@ -27,9 +28,19 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// @dev Thrown when the given address is `address(0)`.
|
||||
error ErrorZeroAddress();
|
||||
|
||||
/// @dev Thrown when the given start batch index is finalized.
|
||||
error ErrorStartBatchIndexFinalized();
|
||||
|
||||
/// @dev Thrown when the given start batch index is smaller than `latestVerifier.startBatchIndex`.
|
||||
error ErrorStartBatchIndexTooSmall();
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
|
||||
/// @notice The address of ScrollChain contract.
|
||||
address public immutable scrollChain;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
@@ -56,7 +67,14 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(uint256[] memory _versions, address[] memory _verifiers) {
|
||||
constructor(
|
||||
address _scrollChain,
|
||||
uint256[] memory _versions,
|
||||
address[] memory _verifiers
|
||||
) {
|
||||
if (_scrollChain == address(0)) revert ErrorZeroAddress();
|
||||
scrollChain = _scrollChain;
|
||||
|
||||
for (uint256 i = 0; i < _versions.length; i++) {
|
||||
if (_verifiers[i] == address(0)) revert ErrorZeroAddress();
|
||||
latestVerifier[_versions[i]].verifier = _verifiers[i];
|
||||
@@ -139,11 +157,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
uint64 _startBatchIndex,
|
||||
address _verifier
|
||||
) external onlyOwner {
|
||||
// We are using version to decide the verifier to use and also this function is
|
||||
// controlled by 7 days TimeLock. It is hard to predict `lastFinalizedBatchIndex` after 7 days.
|
||||
// So we decide to remove this check to make verifier updating more easier.
|
||||
// if (_startBatchIndex <= IScrollChain(scrollChain).lastFinalizedBatchIndex())
|
||||
// revert ErrorStartBatchIndexFinalized();
|
||||
if (_startBatchIndex <= IScrollChain(scrollChain).lastFinalizedBatchIndex())
|
||||
revert ErrorStartBatchIndexFinalized();
|
||||
|
||||
Verifier memory _latestVerifier = latestVerifier[_version];
|
||||
if (_startBatchIndex < _latestVerifier.startBatchIndex) revert ErrorStartBatchIndexTooSmall();
|
||||
|
||||
@@ -74,6 +74,9 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @dev Thrown when the previous state root doesn't match stored one.
|
||||
error ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
/// @dev Thrown when the batch header version is invalid.
|
||||
error ErrorInvalidBatchHeaderVersion();
|
||||
|
||||
/// @dev Thrown when the last message is skipped.
|
||||
error ErrorLastL1MessageSkipped();
|
||||
|
||||
@@ -116,8 +119,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 private constant BLS_MODULUS =
|
||||
52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
@@ -308,10 +310,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
batchPtr,
|
||||
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
|
||||
);
|
||||
} else if (_version >= 1) {
|
||||
// versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec,
|
||||
// but they use different blob encoding and different verifiers.
|
||||
|
||||
} else if (_version == 1) {
|
||||
bytes32 blobVersionedHash;
|
||||
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
|
||||
_totalL1MessagesPoppedOverall,
|
||||
@@ -323,7 +322,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
|
||||
}
|
||||
// store entries, the order matters
|
||||
BatchHeaderV1Codec.storeVersion(batchPtr, _version);
|
||||
BatchHeaderV1Codec.storeVersion(batchPtr, 1);
|
||||
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
@@ -336,6 +335,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
batchPtr,
|
||||
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
|
||||
);
|
||||
} else {
|
||||
revert ErrorInvalidBatchHeaderVersion();
|
||||
}
|
||||
|
||||
// check the length of bitmap
|
||||
@@ -710,15 +711,18 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
version := shr(248, calldataload(_batchHeader.offset))
|
||||
}
|
||||
|
||||
// version should be always 0 or 1 in current code
|
||||
uint256 _length;
|
||||
if (version == 0) {
|
||||
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
|
||||
} else if (version >= 1) {
|
||||
} else if (version == 1) {
|
||||
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
|
||||
} else {
|
||||
revert ErrorInvalidBatchHeaderVersion();
|
||||
}
|
||||
// only check when genesis is imported
|
||||
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {
|
||||
|
||||
@@ -15,22 +15,10 @@ interface IL1GasPriceOracle {
|
||||
/// @param scalar The current fee scalar updated.
|
||||
event ScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current commit fee scalar is updated.
|
||||
/// @param scalar The current commit fee scalar updated.
|
||||
event CommitScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current blob fee scalar is updated.
|
||||
/// @param scalar The current blob fee scalar updated.
|
||||
event BlobScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current l1 base fee is updated.
|
||||
/// @param l1BaseFee The current l1 base fee updated.
|
||||
event L1BaseFeeUpdated(uint256 l1BaseFee);
|
||||
|
||||
/// @notice Emitted when current l1 blob base fee is updated.
|
||||
/// @param l1BlobBaseFee The current l1 blob base fee updated.
|
||||
event L1BlobBaseFeeUpdated(uint256 l1BlobBaseFee);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
@@ -38,24 +26,15 @@ interface IL1GasPriceOracle {
|
||||
/// @notice Return the current l1 fee overhead.
|
||||
function overhead() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 fee scalar before Curie fork.
|
||||
/// @notice Return the current l1 fee scalar.
|
||||
function scalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 commit fee scalar.
|
||||
function commitScalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 blob fee scalar.
|
||||
function blobScalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest known l1 base fee.
|
||||
function l1BaseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest known l1 blob base fee.
|
||||
function l1BlobBaseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Computes the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters.
|
||||
/// @param data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @param data Unsigned fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function getL1Fee(bytes memory data) external view returns (uint256);
|
||||
|
||||
@@ -63,7 +42,7 @@ interface IL1GasPriceOracle {
|
||||
/// represents the per-transaction gas overhead of posting the transaction and state
|
||||
/// roots to L1. Adds 74 bytes of padding to account for the fact that the input does
|
||||
/// not have a signature.
|
||||
/// @param data Signed fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @param data Unsigned fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @return Amount of L1 gas used to publish the transaction.
|
||||
function getL1GasUsed(bytes memory data) external view returns (uint256);
|
||||
|
||||
@@ -74,9 +53,4 @@ interface IL1GasPriceOracle {
|
||||
/// @notice Allows whitelisted caller to modify the l1 base fee.
|
||||
/// @param _l1BaseFee New l1 base fee.
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external;
|
||||
|
||||
/// @notice Allows whitelisted caller to modify the l1 base fee.
|
||||
/// @param _l1BaseFee New l1 base fee.
|
||||
/// @param _l1BlobBaseFee New l1 blob base fee.
|
||||
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external;
|
||||
}
|
||||
|
||||
@@ -17,28 +17,6 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
event UpdateWhitelist(address _oldWhitelist, address _newWhitelist);
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when the blob fee scalar exceed `MAX_BLOB_SCALAR`.
|
||||
error ErrExceedMaxBlobScalar();
|
||||
|
||||
/// @dev Thrown when the commit fee scalar exceed `MAX_COMMIT_SCALAR`.
|
||||
error ErrExceedMaxCommitScalar();
|
||||
|
||||
/// @dev Thrown when the l1 fee overhead exceed `MAX_OVERHEAD`.
|
||||
error ErrExceedMaxOverhead();
|
||||
|
||||
/// @dev Thrown when the l1 fee scalar exceed `MAX_SCALAR`.
|
||||
error ErrExceedMaxScalar();
|
||||
|
||||
/// @dev Thrown when the caller is not whitelisted.
|
||||
error ErrCallerNotWhitelisted();
|
||||
|
||||
/// @dev Thrown when we enable Curie fork after Curie fork.
|
||||
error ErrAlreadyInCurieFork();
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
@@ -50,25 +28,9 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// Computed based on current l1 block gas limit.
|
||||
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
|
||||
|
||||
/// @dev The maximum possible l1 fee scale before Curie.
|
||||
/// @dev The maximum possible l1 fee scale.
|
||||
/// x1000 should be enough.
|
||||
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
|
||||
|
||||
/// @dev The maximum possible l1 commit fee scalar after Curie.
|
||||
/// We derive the commit scalar by
|
||||
/// ```
|
||||
/// commit_scalar = commit_gas_per_tx * fluctuation_multiplier * 1e9
|
||||
/// ```
|
||||
/// So, the value should not exceed 10^9 * 1e9 normally.
|
||||
uint256 private constant MAX_COMMIT_SCALAR = 10 ** 9 * PRECISION;
|
||||
|
||||
/// @dev The maximum possible l1 blob fee scalar after Curie.
|
||||
/// We derive the blob scalar by
|
||||
/// ```
|
||||
/// blob_scalar = fluctuation_multiplier / compression_ratio / blob_util_ratio * 1e9
|
||||
/// ```
|
||||
/// So, the value should not exceed 10^9 * 1e9 normally.
|
||||
uint256 private constant MAX_BLOB_SCALAR = 10 ** 9 * PRECISION;
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
@@ -86,27 +48,6 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// @notice The address of whitelist contract.
|
||||
IWhitelist public whitelist;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override l1BlobBaseFee;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override commitScalar;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override blobScalar;
|
||||
|
||||
/// @notice Indicates whether the network has gone through the Curie upgrade.
|
||||
bool public isCurie;
|
||||
|
||||
/*************
|
||||
* Modifiers *
|
||||
*************/
|
||||
|
||||
modifier onlyWhitelistedSender() {
|
||||
if (!whitelist.isSenderAllowed(msg.sender)) revert ErrCallerNotWhitelisted();
|
||||
_;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
@@ -121,116 +62,15 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function getL1Fee(bytes memory _data) external view override returns (uint256) {
|
||||
if (isCurie) {
|
||||
return _getL1FeeCurie(_data);
|
||||
} else {
|
||||
return _getL1FeeBeforeCurie(_data);
|
||||
}
|
||||
uint256 _l1GasUsed = getL1GasUsed(_data);
|
||||
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
|
||||
return (_l1Fee * scalar) / PRECISION;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
/// @dev The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
|
||||
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
|
||||
function getL1GasUsed(bytes memory _data) public view override returns (uint256) {
|
||||
if (isCurie) {
|
||||
// It is near zero since we put all transactions to blob.
|
||||
return 0;
|
||||
} else {
|
||||
return _getL1GasUsedBeforeCurie(_data);
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external override onlyWhitelistedSender {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFeeAndBlobBaseFee(
|
||||
uint256 _l1BaseFee,
|
||||
uint256 _l1BlobBaseFee
|
||||
) external override onlyWhitelistedSender {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
l1BlobBaseFee = _l1BlobBaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
emit L1BlobBaseFeeUpdated(_l1BlobBaseFee);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Allows the owner to modify the overhead.
|
||||
/// @param _overhead New overhead
|
||||
function setOverhead(uint256 _overhead) external onlyOwner {
|
||||
if (_overhead > MAX_OVERHEAD) revert ErrExceedMaxOverhead();
|
||||
|
||||
overhead = _overhead;
|
||||
emit OverheadUpdated(_overhead);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_SCALAR) revert ErrExceedMaxScalar();
|
||||
|
||||
scalar = _scalar;
|
||||
emit ScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the commit scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setCommitScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_COMMIT_SCALAR) revert ErrExceedMaxCommitScalar();
|
||||
|
||||
commitScalar = _scalar;
|
||||
emit CommitScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the blob scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setBlobScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_BLOB_SCALAR) revert ErrExceedMaxBlobScalar();
|
||||
|
||||
blobScalar = _scalar;
|
||||
emit BlobScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// @notice Update whitelist contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
function updateWhitelist(address _newWhitelist) external onlyOwner {
|
||||
address _oldWhitelist = address(whitelist);
|
||||
|
||||
whitelist = IWhitelist(_newWhitelist);
|
||||
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
|
||||
}
|
||||
|
||||
/// @notice Enable the Curie fork (callable by contract owner).
|
||||
///
|
||||
/// @dev Since this is a predeploy contract, we will directly set the slot while hard fork
|
||||
/// to avoid external owner operations.
|
||||
/// The reason that we keep this function is for easy unit testing.
|
||||
function enableCurie() external onlyOwner {
|
||||
if (isCurie) revert ErrAlreadyInCurieFork();
|
||||
isCurie = true;
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to computes the amount of L1 gas used for a transaction before Curie fork.
|
||||
/// The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
|
||||
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @return Amount of L1 gas used to publish the transaction.
|
||||
function _getL1GasUsedBeforeCurie(bytes memory _data) private view returns (uint256) {
|
||||
uint256 _total = 0;
|
||||
uint256 _length = _data.length;
|
||||
unchecked {
|
||||
@@ -245,22 +85,48 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters, before Curie fork.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function _getL1FeeBeforeCurie(bytes memory _data) private view returns (uint256) {
|
||||
uint256 _l1GasUsed = _getL1GasUsedBeforeCurie(_data);
|
||||
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
|
||||
return (_l1Fee * scalar) / PRECISION;
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external override {
|
||||
require(whitelist.isSenderAllowed(msg.sender), "Not whitelisted sender");
|
||||
|
||||
l1BaseFee = _l1BaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
}
|
||||
|
||||
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters, after Curie fork.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function _getL1FeeCurie(bytes memory _data) private view returns (uint256) {
|
||||
// We have bounded the value of `commitScalar` and `blobScalar`, the whole expression won't overflow.
|
||||
return (commitScalar * l1BaseFee + blobScalar * _data.length * l1BlobBaseFee) / PRECISION;
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Allows the owner to modify the overhead.
|
||||
/// @param _overhead New overhead
|
||||
function setOverhead(uint256 _overhead) external onlyOwner {
|
||||
require(_overhead <= MAX_OVERHEAD, "exceed maximum overhead");
|
||||
|
||||
overhead = _overhead;
|
||||
emit OverheadUpdated(_overhead);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setScalar(uint256 _scalar) external onlyOwner {
|
||||
require(_scalar <= MAX_SCALE, "exceed maximum scale");
|
||||
|
||||
scalar = _scalar;
|
||||
emit ScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// @notice Update whitelist contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
function updateWhitelist(address _newWhitelist) external onlyOwner {
|
||||
address _oldWhitelist = address(whitelist);
|
||||
|
||||
whitelist = IWhitelist(_newWhitelist);
|
||||
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,15 +4,14 @@ pragma solidity =0.8.24;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {L1BlockContainer} from "../L2/predeploys/L1BlockContainer.sol";
|
||||
import {L1GasPriceOracle} from "../L2/predeploys/L1GasPriceOracle.sol";
|
||||
import {Whitelist} from "../L2/predeploys/Whitelist.sol";
|
||||
|
||||
contract L1GasPriceOracleTest is DSTestPlus {
|
||||
uint256 private constant PRECISION = 1e9;
|
||||
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
|
||||
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
|
||||
uint256 private constant MAX_COMMIT_SCALAR = 10 ** 9 * PRECISION;
|
||||
uint256 private constant MAX_BLOB_SCALAR = 10 ** 9 * PRECISION;
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
|
||||
L1GasPriceOracle private oracle;
|
||||
Whitelist private whitelist;
|
||||
@@ -37,7 +36,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
hevm.stopPrank();
|
||||
|
||||
// overhead is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxOverhead.selector);
|
||||
hevm.expectRevert("exceed maximum overhead");
|
||||
oracle.setOverhead(MAX_OVERHEAD + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
@@ -47,7 +46,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
}
|
||||
|
||||
function testSetScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_SCALAR);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALE);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
@@ -56,8 +55,8 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxScalar.selector);
|
||||
oracle.setScalar(MAX_SCALAR + 1);
|
||||
hevm.expectRevert("exceed maximum scale");
|
||||
oracle.setScalar(MAX_SCALE + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.scalar(), 0);
|
||||
@@ -65,44 +64,6 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.scalar(), _scalar);
|
||||
}
|
||||
|
||||
function testSetCommitScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_COMMIT_SCALAR);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.setCommitScalar(_scalar);
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxCommitScalar.selector);
|
||||
oracle.setCommitScalar(MAX_COMMIT_SCALAR + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.commitScalar(), 0);
|
||||
oracle.setCommitScalar(_scalar);
|
||||
assertEq(oracle.commitScalar(), _scalar);
|
||||
}
|
||||
|
||||
function testSetBlobScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_BLOB_SCALAR);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.setBlobScalar(_scalar);
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxBlobScalar.selector);
|
||||
oracle.setBlobScalar(MAX_COMMIT_SCALAR + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.blobScalar(), 0);
|
||||
oracle.setBlobScalar(_scalar);
|
||||
assertEq(oracle.blobScalar(), _scalar);
|
||||
}
|
||||
|
||||
function testUpdateWhitelist(address _newWhitelist) external {
|
||||
hevm.assume(_newWhitelist != address(whitelist));
|
||||
|
||||
@@ -118,29 +79,12 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(address(oracle.whitelist()), _newWhitelist);
|
||||
}
|
||||
|
||||
function testEnableCurie() external {
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.enableCurie();
|
||||
hevm.stopPrank();
|
||||
|
||||
// call by owner, should succeed
|
||||
assertBoolEq(oracle.isCurie(), false);
|
||||
oracle.enableCurie();
|
||||
assertBoolEq(oracle.isCurie(), true);
|
||||
|
||||
// enable twice, should revert
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrAlreadyInCurieFork.selector);
|
||||
oracle.enableCurie();
|
||||
}
|
||||
|
||||
function testSetL1BaseFee(uint256 _baseFee) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrCallerNotWhitelisted.selector);
|
||||
hevm.expectRevert("Not whitelisted sender");
|
||||
oracle.setL1BaseFee(_baseFee);
|
||||
hevm.stopPrank();
|
||||
|
||||
@@ -150,25 +94,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.l1BaseFee(), _baseFee);
|
||||
}
|
||||
|
||||
function testSetL1BaseFeeAndBlobBaseFee(uint256 _baseFee, uint256 _blobBaseFee) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_blobBaseFee = bound(_blobBaseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrCallerNotWhitelisted.selector);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
hevm.stopPrank();
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.l1BaseFee(), 0);
|
||||
assertEq(oracle.l1BlobBaseFee(), 0);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
assertEq(oracle.l1BaseFee(), _baseFee);
|
||||
assertEq(oracle.l1BlobBaseFee(), _blobBaseFee);
|
||||
}
|
||||
|
||||
function testGetL1GasUsedBeforeCurie(uint256 _overhead, bytes memory _data) external {
|
||||
function testGetL1GasUsed(uint256 _overhead, bytes memory _data) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
|
||||
oracle.setOverhead(_overhead);
|
||||
@@ -182,14 +108,14 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.getL1GasUsed(_data), _gasUsed);
|
||||
}
|
||||
|
||||
function testGetL1FeeBeforeCurie(
|
||||
function testGetL1Fee(
|
||||
uint256 _baseFee,
|
||||
uint256 _overhead,
|
||||
uint256 _scalar,
|
||||
bytes memory _data
|
||||
) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALAR);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALE);
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
oracle.setOverhead(_overhead);
|
||||
@@ -204,32 +130,4 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
|
||||
assertEq(oracle.getL1Fee(_data), (_gasUsed * _baseFee * _scalar) / PRECISION);
|
||||
}
|
||||
|
||||
function testGetL1GasUsedCurie(bytes memory _data) external {
|
||||
oracle.enableCurie();
|
||||
assertEq(oracle.getL1GasUsed(_data), 0);
|
||||
}
|
||||
|
||||
function testGetL1FeeCurie(
|
||||
uint256 _baseFee,
|
||||
uint256 _blobBaseFee,
|
||||
uint256 _commitScalar,
|
||||
uint256 _blobScalar,
|
||||
bytes memory _data
|
||||
) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_blobBaseFee = bound(_blobBaseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_commitScalar = bound(_commitScalar, 0, MAX_COMMIT_SCALAR);
|
||||
_blobScalar = bound(_blobScalar, 0, MAX_BLOB_SCALAR);
|
||||
|
||||
oracle.enableCurie();
|
||||
oracle.setCommitScalar(_commitScalar);
|
||||
oracle.setBlobScalar(_blobScalar);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
|
||||
assertEq(
|
||||
oracle.getL1Fee(_data),
|
||||
(_commitScalar * _baseFee + _blobScalar * _blobBaseFee * _data.length) / PRECISION
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,17 +18,19 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
MockZkEvmVerifier private v0;
|
||||
MockZkEvmVerifier private v1;
|
||||
MockZkEvmVerifier private v2;
|
||||
MockScrollChain private chain;
|
||||
|
||||
function setUp() external {
|
||||
v0 = new MockZkEvmVerifier();
|
||||
v1 = new MockZkEvmVerifier();
|
||||
v2 = new MockZkEvmVerifier();
|
||||
|
||||
chain = new MockScrollChain(address(1), address(1));
|
||||
uint256[] memory _versions = new uint256[](1);
|
||||
address[] memory _verifiers = new address[](1);
|
||||
_versions[0] = 0;
|
||||
_verifiers[0] = address(v0);
|
||||
verifier = new MultipleVersionRollupVerifier(_versions, _verifiers);
|
||||
verifier = new MultipleVersionRollupVerifier(address(chain), _versions, _verifiers);
|
||||
}
|
||||
|
||||
function testUpdateVerifierVersion0(address _newVerifier) external {
|
||||
@@ -40,6 +42,10 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
verifier.updateVerifier(0, 0, address(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// start batch index finalized, revert
|
||||
hevm.expectRevert(MultipleVersionRollupVerifier.ErrorStartBatchIndexFinalized.selector);
|
||||
verifier.updateVerifier(0, 0, address(1));
|
||||
|
||||
// zero verifier address, revert
|
||||
hevm.expectRevert(MultipleVersionRollupVerifier.ErrorZeroAddress.selector);
|
||||
verifier.updateVerifier(0, 1, address(0));
|
||||
@@ -87,6 +93,10 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
verifier.updateVerifier(version, 0, address(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// start batch index finalized, revert
|
||||
hevm.expectRevert(MultipleVersionRollupVerifier.ErrorStartBatchIndexFinalized.selector);
|
||||
verifier.updateVerifier(version, 0, address(1));
|
||||
|
||||
// zero verifier address, revert
|
||||
hevm.expectRevert(MultipleVersionRollupVerifier.ErrorZeroAddress.selector);
|
||||
verifier.updateVerifier(version, 1, address(0));
|
||||
|
||||
@@ -89,6 +89,12 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.commitBatch(0, batchHeader0, new bytes[](0), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// invalid version, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert(ScrollChain.ErrorInvalidBatchHeaderVersion.selector);
|
||||
rollup.commitBatch(2, batchHeader0, new bytes[](1), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// batch header length too small, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert(BatchHeaderV0Codec.ErrorBatchHeaderLengthTooSmall.selector);
|
||||
|
||||
@@ -19,6 +19,7 @@ test:
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
|
||||
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
|
||||
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/verifier/lib
|
||||
|
||||
coordinator_api: libzkp ## Builds the Coordinator api instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
@@ -42,7 +42,6 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vkMap: vkMap,
|
||||
reverseVkMap: reverseMap(vkMap),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -65,31 +64,48 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
return bp
|
||||
}
|
||||
|
||||
type chunkIndexRange struct {
|
||||
start uint64
|
||||
end uint64
|
||||
}
|
||||
|
||||
func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange {
|
||||
var start, end = r.start, r.end
|
||||
if o.start < r.start {
|
||||
start = o.start
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
if o.end > r.end {
|
||||
end = o.end
|
||||
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
return &chunkIndexRange{start, end}
|
||||
}
|
||||
|
||||
func (r *chunkIndexRange) contains(start, end uint64) bool {
|
||||
return r.start <= start && r.end > end
|
||||
}
|
||||
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
|
||||
// so the hard fork chunk's start_block_number must be ForkBlockNumber
|
||||
var startChunkIndex uint64 = 0
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
startChunkIndex = startChunk.Index
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
// toChunk being nil only indicates that we haven't yet reached the fork boundary
|
||||
// don't need change the endChunkIndex of math.MaxInt64
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
|
||||
type getHardForkNameByBatchFunc func(*orm.Batch) (string, error)
|
||||
|
||||
func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCtx *proverTaskContext,
|
||||
chunkRange *chunkIndexRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByBatchFunc) (*coordinatorType.GetTaskSchema, error) {
|
||||
startChunkIndex, endChunkIndex := chunkRange.start, chunkRange.end
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
var batchTask *orm.Batch
|
||||
@@ -137,26 +153,14 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
)
|
||||
var err error
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(batchTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get hard fork name by batch", "task_id", batchTask.Hash, "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: proverVersion,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
@@ -166,18 +170,18 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
|
||||
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
@@ -187,107 +191,6 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
|
||||
// so the hard fork chunk's start_block_number must be ForkBlockNumber
|
||||
var startChunkIndex uint64 = 0
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", hardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
startChunkIndex = startChunk.Index
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", hardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
// toChunk being nil only indicates that we haven't yet reached the fork boundary
|
||||
// don't need change the endChunkIndex of math.MaxInt64
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
chunkRange, err := bp.getChunkRangeByName(ctx, taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkRange == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, nil)
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
var (
|
||||
hardForkNames [2]string
|
||||
chunkRanges [2]*chunkIndexRange
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
|
||||
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkRanges[i] == nil {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
chunkRange := chunkRanges[0].merge(*chunkRanges[1])
|
||||
var hardForkName string
|
||||
getHardForkName := func(batch *orm.Batch) (string, error) {
|
||||
for i := 0; i < 2; i++ {
|
||||
if chunkRanges[i].contains(batch.StartChunkIndex, batch.EndChunkIndex) {
|
||||
hardForkName = hardForkNames[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if hardForkName == "" {
|
||||
log.Warn("get batch not belongs to any hard fork name", "batch id", batch.Index)
|
||||
return "", fmt.Errorf("get batch not belongs to any hard fork name, batch id: %d", batch.Index)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
schema, err := bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, getHardForkName)
|
||||
if schema != nil && err == nil {
|
||||
schema.HardForkName = hardForkName
|
||||
return schema, nil
|
||||
}
|
||||
return schema, err
|
||||
}
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
return bp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return bp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get chunk from db
|
||||
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
|
||||
|
||||
@@ -39,7 +39,6 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vkMap: vkMap,
|
||||
reverseVkMap: reverseMap(vkMap),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -62,11 +61,20 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
return cp
|
||||
}
|
||||
|
||||
type getHardForkNameByChunkFunc func(*orm.Chunk) (string, error)
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCtx *proverTaskContext,
|
||||
blockRange *blockRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByChunkFunc) (*coordinatorType.GetTaskSchema, error) {
|
||||
fromBlockNum, toBlockNum := blockRange.from, blockRange.to
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
|
||||
if toBlockNum > getTaskParameter.ProverHeight {
|
||||
toBlockNum = getTaskParameter.ProverHeight + 1
|
||||
}
|
||||
@@ -118,26 +126,14 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
err error
|
||||
)
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(chunkTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get hard fork name by chunk", "task_id", chunkTask.Hash, "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: proverVersion,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
@@ -146,18 +142,18 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
|
||||
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
@@ -167,95 +163,6 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
blockRange, err := cp.getBlockRangeByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, nil)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
var (
|
||||
hardForkNames [2]string
|
||||
blockRanges [2]*blockRange
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
|
||||
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
blockRange, err := blockRanges[0].merge(*blockRanges[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var hardForkName string
|
||||
getHardForkName := func(chunk *orm.Chunk) (string, error) {
|
||||
for i := 0; i < 2; i++ {
|
||||
if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
|
||||
hardForkName = hardForkNames[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if hardForkName == "" {
|
||||
log.Warn("get chunk not belongs to any hard fork name", "chunk id", chunk.Index)
|
||||
return "", fmt.Errorf("get chunk not belongs to any hard fork name, chunk id: %d", chunk.Index)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
schema, err := cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, getHardForkName)
|
||||
if schema != nil && err == nil {
|
||||
schema.HardForkName = hardForkName
|
||||
return schema, nil
|
||||
}
|
||||
return schema, err
|
||||
}
|
||||
|
||||
type blockRange struct {
|
||||
from uint64
|
||||
to uint64
|
||||
}
|
||||
|
||||
func (r *blockRange) merge(o blockRange) (*blockRange, error) {
|
||||
if r.from == o.to {
|
||||
return &blockRange{o.from, r.to}, nil
|
||||
} else if r.to == o.from {
|
||||
return &blockRange{r.from, o.to}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("two ranges are not adjacent")
|
||||
}
|
||||
|
||||
func (r *blockRange) contains(start, end uint64) bool {
|
||||
return r.from <= start && r.to > end
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
|
||||
return &blockRange{fromBlockNum, toBlockNum}, nil
|
||||
}
|
||||
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
|
||||
// Get block hashes.
|
||||
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
|
||||
|
||||
@@ -29,27 +29,14 @@ type ProverTask interface {
|
||||
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
|
||||
}
|
||||
|
||||
func reverseMap(input map[string]string) map[string]string {
|
||||
output := make(map[string]string, len(input))
|
||||
for k, v := range input {
|
||||
if k != "" {
|
||||
output[v] = k
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// BaseProverTask a base prover task which contain series functions
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
|
||||
// key is hardForkName, value is vk
|
||||
vkMap map[string]string
|
||||
// key is vk, value is hardForkName
|
||||
reverseVkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
vkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
@@ -87,42 +74,30 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
|
||||
}
|
||||
|
||||
// signals that the prover is multi-circuits version
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
if len(getTaskParameter.VKs) != 2 {
|
||||
return nil, fmt.Errorf("parameter vks length must be 2")
|
||||
}
|
||||
for _, vk := range getTaskParameter.VKs {
|
||||
if _, exists := b.reverseVkMap[vk]; !exists {
|
||||
return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
|
||||
@@ -134,12 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
// use hard_fork_name from parameter first
|
||||
// if prover support multi hard_forks, the real hard_fork_name is not set to the gin context
|
||||
hardForkName := proofParameter.HardForkName
|
||||
if hardForkName == "" {
|
||||
hardForkName = ctx.GetString(coordinatorType.HardForkName)
|
||||
}
|
||||
hardForkName := ctx.GetString(coordinatorType.HardForkName)
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -3,8 +3,8 @@
|
||||
package verifier
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
@@ -164,7 +164,9 @@ func (v *Verifier) loadEmbedVK() error {
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ func TestFFI(t *testing.T) {
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "curie")
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
|
||||
@@ -309,7 +309,7 @@ func (o *Batch) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
db = db.Where("total_attempts >= ?", maxAttempts)
|
||||
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
|
||||
db = db.Where("proving_status != ?", int(types.ProverProofValid))
|
||||
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, types.ProvingTaskFailed.String())
|
||||
}
|
||||
|
||||
@@ -332,7 +332,7 @@ func (o *Chunk) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash", hash)
|
||||
db = db.Where("total_attempts >= ?", maxAttempts)
|
||||
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
|
||||
db = db.Where("proving_status != ?", int(types.ProverProofValid))
|
||||
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, types.ProvingTaskFailed.String())
|
||||
}
|
||||
|
||||
@@ -2,17 +2,15 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"` // will be deprecated after all go_prover offline
|
||||
VKs []string `form:"vks" json:"vks"` // for rust_prover that supporting multi-circuits
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"`
|
||||
}
|
||||
|
||||
// GetTaskSchema the schema data return to prover for get prover task
|
||||
type GetTaskSchema struct {
|
||||
UUID string `json:"uuid"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
TaskData string `json:"task_data"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
UUID string `json:"uuid"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
TaskData string `json:"task_data"`
|
||||
}
|
||||
|
||||
@@ -3,12 +3,11 @@ package types
|
||||
// SubmitProofParameter the SubmitProof api request parameter
|
||||
type SubmitProofParameter struct {
|
||||
// TODO when prover have upgrade, need change this field to required
|
||||
UUID string `form:"uuid" json:"uuid"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
UUID string `form:"uuid" json:"uuid"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
}
|
||||
|
||||
74
database/script/abi.json
Normal file
74
database/script/abi.json
Normal file
@@ -0,0 +1,74 @@
|
||||
[
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "basefee",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "BaseFeeSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "bytes32",
|
||||
"name": "data",
|
||||
"type": "bytes32"
|
||||
}
|
||||
],
|
||||
"name": "McopySuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "TloadSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [],
|
||||
"name": "TstoreSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "useBaseFee",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "useMcopy",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "newValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "useTloadTstore",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
221
database/script/send_transactions.go
Normal file
221
database/script/send_transactions.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(os.Getenv("L2_DEPLOYER_PRIVATE_KEY"), "0x"))
|
||||
if err != nil {
|
||||
log.Crit("failed to create private key", "err", err)
|
||||
}
|
||||
publicKey := privateKey.Public()
|
||||
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
log.Crit("failed to cast public key to ECDSA")
|
||||
}
|
||||
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
|
||||
|
||||
client, err := ethclient.Dial(os.Getenv("SCROLL_L2_DEPLOYMENT_RPC"))
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to network", "err", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, new(big.Int).SetUint64(222222))
|
||||
if err != nil {
|
||||
log.Crit("failed to initialize keyed transactor with chain ID", "err", err)
|
||||
}
|
||||
|
||||
abiJSON, err := os.ReadFile("abi.json")
|
||||
if err != nil {
|
||||
log.Crit("failed to read ABI file", "err", err)
|
||||
}
|
||||
|
||||
l2TestCurieOpcodesMetaData := &bind.MetaData{ABI: string(abiJSON)}
|
||||
l2TestCurieOpcodesAbi, err := l2TestCurieOpcodesMetaData.GetAbi()
|
||||
if err != nil {
|
||||
log.Crit("failed to get abi", "err", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
|
||||
if err != nil {
|
||||
log.Crit("failed to get pending nonce", "err", err)
|
||||
}
|
||||
|
||||
useTloadTstoreCalldata, err := l2TestCurieOpcodesAbi.Pack("useTloadTstore", new(big.Int).SetUint64(9876543210))
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useTloadTstore calldata", "err", err)
|
||||
}
|
||||
|
||||
useMcopyCalldata, err := l2TestCurieOpcodesAbi.Pack("useMcopy")
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useMcopy calldata", "err", err)
|
||||
}
|
||||
|
||||
useBaseFee, err := l2TestCurieOpcodesAbi.Pack("useBaseFee")
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useBaseFee calldata", "err", err)
|
||||
}
|
||||
|
||||
l2TestCurieOpcodesAddr := common.HexToAddress(os.Getenv("L2_TEST_CURIE_OPCODES_ADDR"))
|
||||
|
||||
txTypes := []int{
|
||||
LegacyTxType,
|
||||
AccessListTxType,
|
||||
DynamicFeeTxType,
|
||||
}
|
||||
|
||||
accessLists := []types.AccessList{
|
||||
nil,
|
||||
{
|
||||
{Address: common.HexToAddress("0x0000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x1000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001")}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x2000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
}},
|
||||
{Address: common.HexToAddress("0x3000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x4000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"),
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // repetitive storage key
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
for _, txType := range txTypes {
|
||||
for _, accessList := range accessLists {
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useTloadTstoreCalldata); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useMcopyCalldata); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useBaseFee); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, nil, []byte{0x01, 0x02, 0x03, 0x04}); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, new(big.Int).SetUint64(1), []byte{0x01, 0x02, 0x03, 0x04}); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, new(big.Int).SetUint64(1), nil); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
LegacyTxType = 1
|
||||
AccessListTxType = 2
|
||||
DynamicFeeTxType = 3
|
||||
)
|
||||
|
||||
func sendTransaction(client *ethclient.Client, auth *bind.TransactOpts, txType int, to *common.Address, nonce uint64, accessList types.AccessList, value *big.Int, data []byte) error {
|
||||
var txData types.TxData
|
||||
switch txType {
|
||||
case LegacyTxType:
|
||||
txData = &types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
GasPrice: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
}
|
||||
case AccessListTxType:
|
||||
txData = &types.AccessListTx{
|
||||
ChainID: new(big.Int).SetUint64(222222),
|
||||
Nonce: nonce,
|
||||
GasPrice: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
AccessList: accessList,
|
||||
}
|
||||
case DynamicFeeTxType:
|
||||
txData = &types.DynamicFeeTx{
|
||||
ChainID: new(big.Int).SetUint64(222222),
|
||||
Nonce: nonce,
|
||||
GasTipCap: new(big.Int).SetUint64(1000000000),
|
||||
GasFeeCap: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
AccessList: accessList,
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid transaction type: %d", txType)
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sign tx: %w", err)
|
||||
}
|
||||
|
||||
if err = client.SendTransaction(context.Background(), signedTx); err != nil {
|
||||
return fmt.Errorf("failed to send tx: %w", err)
|
||||
}
|
||||
|
||||
log.Info("transaction sent", "txHash", signedTx.Hash().Hex())
|
||||
|
||||
var receipt *types.Receipt
|
||||
for {
|
||||
receipt, err = client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
if err == nil {
|
||||
if receipt.Status != types.ReceiptStatusSuccessful {
|
||||
return fmt.Errorf("transaction failed: %s", signedTx.Hash().Hex())
|
||||
}
|
||||
break
|
||||
}
|
||||
log.Warn("waiting for receipt", "txHash", signedTx.Hash())
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
log.Info("Sent transaction", "txHash", signedTx.Hash().Hex(), "from", auth.From.Hex(), "nonce", signedTx.Nonce(), "to", to.Hex())
|
||||
return nil
|
||||
}
|
||||
1
go.work
1
go.work
@@ -5,6 +5,7 @@ use (
|
||||
./common
|
||||
./coordinator
|
||||
./database
|
||||
./prover
|
||||
./rollup
|
||||
./tests/integration-test
|
||||
)
|
||||
|
||||
@@ -137,6 +137,7 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
|
||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
@@ -261,6 +262,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
|
||||
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
|
||||
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
@@ -319,6 +321,7 @@ github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1C
|
||||
github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
|
||||
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
@@ -378,6 +381,8 @@ github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGu
|
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce h1:SN43TBs7VaJt9q737eWWqGz0OCg4v+PtUn3RbJcG1o0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703 h1:dcvPPyyfe3SocOBwgww3e1wcWjgF85kKDsohY4TXII0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
@@ -449,7 +454,6 @@ github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgr
|
||||
github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
|
||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
|
||||
go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4=
|
||||
@@ -514,6 +518,7 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
|
||||
14
prover/.gitignore
vendored
Normal file
14
prover/.gitignore
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
.idea
|
||||
stack/test_stack
|
||||
mock/stack
|
||||
build/bin/
|
||||
|
||||
# ignore db file
|
||||
bbolt_db
|
||||
|
||||
core/lib
|
||||
|
||||
params/
|
||||
seed
|
||||
|
||||
keystore
|
||||
5680
prover/Cargo.lock
generated
5680
prover/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,47 +0,0 @@
|
||||
[package]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
log = "0.4"
|
||||
env_logger = "0.11.3"
|
||||
serde = { version = "1.0.198", features = ["derive"] }
|
||||
serde_json = "1.0.116"
|
||||
futures = "0.3.30"
|
||||
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", branch = "v0.10", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
prover_next = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
base64 = "0.13.1"
|
||||
reqwest = { version = "0.12.4", features = ["gzip"] }
|
||||
reqwest-middleware = "0.3"
|
||||
reqwest-retry = "0.5"
|
||||
once_cell = "1.19.0"
|
||||
hex = "0.4.3"
|
||||
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
|
||||
rand = "0.8.5"
|
||||
eth-keystore = "0.5.0"
|
||||
rlp = "0.5.2"
|
||||
tokio = "1.37.0"
|
||||
sled = "0.34.7"
|
||||
http = "1.1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
@@ -1,41 +1,49 @@
|
||||
.PHONY: prover
|
||||
.PHONY: lint docker clean prover mock-prover
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
ZKEVM_VERSION=$(shell ./print_high_zkevm_version.sh)
|
||||
ifeq (${ZKEVM_VERSION},)
|
||||
$(error ZKEVM_VERSION not set)
|
||||
else
|
||||
$(info ZKEVM_VERSION is ${ZKEVM_VERSION})
|
||||
endif
|
||||
|
||||
ZKEVM_COMMIT=$(shell echo ${ZKEVM_VERSION} | cut -d " " -f2)
|
||||
$(info ZKEVM_COMMIT is ${ZKEVM_COMMIT})
|
||||
|
||||
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
|
||||
|
||||
GIT_REV=$(shell git rev-parse --short HEAD)
|
||||
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
|
||||
|
||||
ifeq (${GO_TAG},)
|
||||
$(error GO_TAG not set)
|
||||
else
|
||||
$(info GO_TAG is ${GO_TAG})
|
||||
endif
|
||||
|
||||
ifeq (${HALO2_GPU_VERSION},)
|
||||
# use halo2_proofs with CPU
|
||||
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_VERSION}
|
||||
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
|
||||
else
|
||||
# use halo2_gpu
|
||||
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_GPU_VERSION}
|
||||
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
|
||||
endif
|
||||
|
||||
prover:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
|
||||
rm -rf ./lib && mkdir ./lib
|
||||
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
|
||||
rm -rf ./core/lib && cp -r ../common/libzkp/interface ./core/lib
|
||||
find ../common | grep libzktrie.so | xargs -I{} cp {} ./core/lib/
|
||||
|
||||
prover: libzkp ## Build the Prover instance.
|
||||
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/prover ./cmd
|
||||
|
||||
mock-prover: ## Build the mocked Prover instance.
|
||||
GOBIN=$(PWD)/build/bin go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/prover ./cmd
|
||||
|
||||
gpu-prover: libzkp ## Build the GPU Prover instance.
|
||||
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags gpu -o $(PWD)/build/bin/prover ./cmd
|
||||
|
||||
test-prover: libzkp
|
||||
go test -tags ffi -timeout 0 -v ./prover
|
||||
|
||||
test-gpu-prover: libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./prover
|
||||
|
||||
lastest-zk-version:
|
||||
curl -sL https://api.github.com/repos/scroll-tech/zkevm-circuits/commits | jq -r ".[0].sha"
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
cp -r ../common/libzkp/interface ./core/lib
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
58
prover/README.md
Normal file
58
prover/README.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Prover
|
||||
|
||||
This directory contains the Scroll Prover module.
|
||||
|
||||
|
||||
## Build
|
||||
```bash
|
||||
make clean
|
||||
make prover
|
||||
```
|
||||
The built prover binary is in the build/bin directory.
|
||||
|
||||
|
||||
## Test
|
||||
|
||||
Make sure to lint before testing (or committing):
|
||||
|
||||
```bash
|
||||
make lint
|
||||
```
|
||||
|
||||
For current unit tests, run:
|
||||
|
||||
```bash
|
||||
make prover
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
|
||||
export CHAIN_ID=534353 # for Scroll Alpha
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
When you need to mock prover results and run other prover tests (using [`core/mock.go`](core/mock.go) instead of [`core/prover.go`](core/prover.go)), run:
|
||||
|
||||
```bash
|
||||
go test -tags="mock_prover" -v -race -covermode=atomic scroll-tech/prover/...
|
||||
```
|
||||
|
||||
|
||||
## Configure
|
||||
|
||||
The prover behavior can be configured using [`config.json`](config.json). Check the code comments of `Config` and `ProverCoreConfig` in [`config/config.go`](config/config.go) for more details.
|
||||
|
||||
|
||||
## Start
|
||||
|
||||
1. Set environment variables:
|
||||
|
||||
```bash
|
||||
export CHAIN_ID=534353 # change to correct chain ID
|
||||
export RUST_MIN_STACK=100000000
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
|
||||
```
|
||||
|
||||
2. Start the module using settings from config.json:
|
||||
|
||||
```bash
|
||||
./build/bin/prover
|
||||
```
|
||||
|
||||
209
prover/client/client.go
Normal file
209
prover/client/client.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/prover/config"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
// CoordinatorClient is a client used for interacting with the Coordinator service.
|
||||
type CoordinatorClient struct {
|
||||
client *resty.Client
|
||||
|
||||
proverName string
|
||||
hardForkName string
|
||||
priv *ecdsa.PrivateKey
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewCoordinatorClient constructs a new CoordinatorClient.
|
||||
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
|
||||
client := resty.New().
|
||||
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
|
||||
SetRetryCount(cfg.RetryCount).
|
||||
SetRetryWaitTime(time.Duration(cfg.RetryWaitTimeSec) * time.Second).
|
||||
SetBaseURL(cfg.BaseURL).
|
||||
AddRetryAfterErrorCondition().
|
||||
AddRetryCondition(func(response *resty.Response, err error) bool {
|
||||
if err != nil {
|
||||
log.Warn("Encountered an error while sending the request. Retrying...", "error", err)
|
||||
return true
|
||||
}
|
||||
return response.IsError()
|
||||
})
|
||||
|
||||
log.Info("successfully initialized prover client",
|
||||
"base url", cfg.BaseURL,
|
||||
"connection timeout (second)", cfg.ConnectionTimeoutSec,
|
||||
"retry count", cfg.RetryCount,
|
||||
"retry wait time (second)", cfg.RetryWaitTimeSec)
|
||||
|
||||
return &CoordinatorClient{
|
||||
client: client,
|
||||
proverName: proverName,
|
||||
hardForkName: hardForkName,
|
||||
priv: priv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Login completes the entire login process in one function call.
|
||||
func (c *CoordinatorClient) Login(ctx context.Context) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
var challengeResult ChallengeResponse
|
||||
|
||||
// Get random string
|
||||
challengeResp, err := c.client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetResult(&challengeResult).
|
||||
Get("/coordinator/v1/challenge")
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("get random string failed: %w", err)
|
||||
}
|
||||
|
||||
if challengeResp.StatusCode() != 200 {
|
||||
return fmt.Errorf("failed to get random string, status code: %v", challengeResp.StatusCode())
|
||||
}
|
||||
|
||||
// Prepare and sign the login request
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
ProverVersion: version.Version,
|
||||
ProverName: c.proverName,
|
||||
Challenge: challengeResult.Data.Token,
|
||||
HardForkName: c.hardForkName,
|
||||
},
|
||||
}
|
||||
|
||||
err = authMsg.SignWithKey(c.priv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("signature failed: %w", err)
|
||||
}
|
||||
|
||||
// Login to coordinator
|
||||
loginReq := &LoginRequest{
|
||||
Message: struct {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}{
|
||||
Challenge: authMsg.Identity.Challenge,
|
||||
ProverName: authMsg.Identity.ProverName,
|
||||
ProverVersion: authMsg.Identity.ProverVersion,
|
||||
HardForkName: authMsg.Identity.HardForkName,
|
||||
},
|
||||
Signature: authMsg.Signature,
|
||||
}
|
||||
|
||||
// store JWT token for login requests
|
||||
c.client.SetAuthToken(challengeResult.Data.Token)
|
||||
|
||||
var loginResult LoginResponse
|
||||
loginResp, err := c.client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetBody(loginReq).
|
||||
SetResult(&loginResult).
|
||||
Post("/coordinator/v1/login")
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("login failed: %w", err)
|
||||
}
|
||||
|
||||
if loginResp.StatusCode() != 200 {
|
||||
return fmt.Errorf("failed to login, status code: %v", loginResp.StatusCode())
|
||||
}
|
||||
|
||||
if loginResult.ErrCode != types.Success {
|
||||
return fmt.Errorf("failed to login, error code: %v, error message: %v", loginResult.ErrCode, loginResult.ErrMsg)
|
||||
}
|
||||
|
||||
// store JWT token for future requests
|
||||
c.client.SetAuthToken(loginResult.Data.Token)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTask sends a request to the coordinator to get prover task.
|
||||
func (c *CoordinatorClient) GetTask(ctx context.Context, req *GetTaskRequest) (*GetTaskResponse, error) {
|
||||
var result GetTaskResponse
|
||||
|
||||
resp, err := c.client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetBody(req).
|
||||
SetResult(&result).
|
||||
Post("/coordinator/v1/get_task")
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request for GetTask failed: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode() != 200 {
|
||||
return nil, fmt.Errorf("failed to get task, status code: %v", resp.StatusCode())
|
||||
}
|
||||
|
||||
if result.ErrCode == types.ErrJWTTokenExpired {
|
||||
log.Info("JWT expired, attempting to re-login")
|
||||
if err := c.Login(ctx); err != nil {
|
||||
return nil, fmt.Errorf("JWT expired, re-login failed: %w", err)
|
||||
}
|
||||
log.Info("re-login success")
|
||||
return c.GetTask(ctx, req)
|
||||
}
|
||||
if result.ErrCode != types.Success {
|
||||
return nil, fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// SubmitProof sends a request to the coordinator to submit proof.
|
||||
func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofRequest) error {
|
||||
var result SubmitProofResponse
|
||||
|
||||
resp, err := c.client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetBody(req).
|
||||
SetResult(&result).
|
||||
Post("/coordinator/v1/submit_proof")
|
||||
|
||||
if err != nil {
|
||||
log.Error("submit proof request failed", "error", err)
|
||||
return fmt.Errorf("submit proof request failed: %w", ErrCoordinatorConnect)
|
||||
}
|
||||
|
||||
if resp.StatusCode() != 200 {
|
||||
log.Error("failed to submit proof", "status code", resp.StatusCode())
|
||||
return fmt.Errorf("failed to submit proof, status code not 200: %w", ErrCoordinatorConnect)
|
||||
}
|
||||
|
||||
if result.ErrCode == types.ErrJWTTokenExpired {
|
||||
log.Info("JWT expired, attempting to re-login")
|
||||
if err := c.Login(ctx); err != nil {
|
||||
log.Error("JWT expired, re-login failed", "error", err)
|
||||
return fmt.Errorf("JWT expired, re-login failed: %w", ErrCoordinatorConnect)
|
||||
}
|
||||
log.Info("re-login success")
|
||||
return c.SubmitProof(ctx, req)
|
||||
}
|
||||
|
||||
if result.ErrCode != types.Success {
|
||||
return fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
77
prover/client/types.go
Normal file
77
prover/client/types.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// ErrCoordinatorConnect connect to coordinator error
|
||||
var ErrCoordinatorConnect = errors.New("connect coordinator error")
|
||||
|
||||
// ChallengeResponse defines the response structure for random API
|
||||
type ChallengeResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data *struct {
|
||||
Time string `json:"time"`
|
||||
Token string `json:"token"`
|
||||
} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// LoginRequest defines the request structure for login API
|
||||
type LoginRequest struct {
|
||||
Message struct {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
} `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// LoginResponse defines the response structure for login API
|
||||
type LoginResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data *struct {
|
||||
Time string `json:"time"`
|
||||
Token string `json:"token"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// GetTaskRequest defines the request structure for GetTask API
|
||||
type GetTaskRequest struct {
|
||||
TaskType message.ProofType `json:"task_type"`
|
||||
ProverHeight uint64 `json:"prover_height,omitempty"`
|
||||
VK string `json:"vk"`
|
||||
}
|
||||
|
||||
// GetTaskResponse defines the response structure for GetTask API
|
||||
type GetTaskResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data *struct {
|
||||
UUID string `json:"uuid"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
TaskData string `json:"task_data"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// SubmitProofRequest defines the request structure for the SubmitProof API.
|
||||
type SubmitProofRequest struct {
|
||||
UUID string `json:"uuid"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
Status int `json:"status"`
|
||||
Proof string `json:"proof"`
|
||||
FailureType int `json:"failure_type,omitempty"`
|
||||
FailureMsg string `json:"failure_msg,omitempty"`
|
||||
}
|
||||
|
||||
// SubmitProofResponse defines the response structure for the SubmitProof API.
|
||||
type SubmitProofResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
}
|
||||
77
prover/cmd/app/app.go
Normal file
77
prover/cmd/app/app.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/prover"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/prover/config"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "prover"
|
||||
app.Usage = "The Scroll L2 Prover"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
|
||||
// Register `prover-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.ChunkProverApp)
|
||||
utils.RegisterSimulation(app, utils.BatchProverApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Create prover
|
||||
r, err := prover.NewProver(context.Background(), cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Start prover.
|
||||
r.Start()
|
||||
|
||||
defer r.Stop()
|
||||
log.Info(
|
||||
"prover start successfully",
|
||||
"name", cfg.ProverName, "type", cfg.Core.ProofType,
|
||||
"publickey", r.PublicKey(), "version", version.Version,
|
||||
)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the prover cmd func.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
29
prover/cmd/app/app_test.go
Normal file
29
prover/cmd/app/app_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestRunChunkProver(t *testing.T) {
|
||||
prover := cmd.NewCmd(string(utils.ChunkProverApp), "--version")
|
||||
defer prover.WaitExit()
|
||||
|
||||
// wait result
|
||||
prover.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("prover version %s", version.Version))
|
||||
prover.RunApp(nil)
|
||||
}
|
||||
|
||||
func TestRunBatchProver(t *testing.T) {
|
||||
prover := cmd.NewCmd(string(utils.BatchProverApp), "--version")
|
||||
defer prover.WaitExit()
|
||||
|
||||
// wait result
|
||||
prover.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("prover version %s", version.Version))
|
||||
prover.RunApp(nil)
|
||||
}
|
||||
128
prover/cmd/app/mock_app.go
Normal file
128
prover/cmd/app/mock_app.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/prover/config"
|
||||
)
|
||||
|
||||
var (
|
||||
proverIndex int
|
||||
)
|
||||
|
||||
func getIndex() int {
|
||||
defer func() { proverIndex++ }()
|
||||
return proverIndex
|
||||
}
|
||||
|
||||
// ProverApp prover-test client manager.
|
||||
type ProverApp struct {
|
||||
Config *config.Config
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
originFile string
|
||||
proverFile string
|
||||
bboltDB string
|
||||
|
||||
index int
|
||||
name string
|
||||
args []string
|
||||
*cmd.Cmd
|
||||
}
|
||||
|
||||
// NewProverApp return a new proverApp manager.
|
||||
func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
|
||||
var proofType message.ProofType
|
||||
switch mockName {
|
||||
case utils.ChunkProverApp:
|
||||
proofType = message.ProofTypeChunk
|
||||
case utils.BatchProverApp:
|
||||
proofType = message.ProofTypeBatch
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
name := string(mockName)
|
||||
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", testApps.Timestamp, name)
|
||||
proverApp := &ProverApp{
|
||||
testApps: testApps,
|
||||
originFile: file,
|
||||
proverFile: proverFile,
|
||||
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", testApps.Timestamp, name),
|
||||
index: getIndex(),
|
||||
name: name,
|
||||
args: []string{"--log.debug", "--config", proverFile},
|
||||
}
|
||||
proverApp.Cmd = cmd.NewCmd(proverApp.name, proverApp.args...)
|
||||
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return proverApp
|
||||
}
|
||||
|
||||
// RunApp run prover-test child process by multi parameters.
|
||||
func (r *ProverApp) RunApp(t *testing.T) {
|
||||
r.Cmd.RunApp(func() bool { return r.Cmd.WaitResult(t, time.Second*40, "prover start successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release prover-test.
|
||||
func (r *ProverApp) Free() {
|
||||
if !utils.IsNil(r.Cmd) {
|
||||
r.Cmd.WaitExit()
|
||||
}
|
||||
_ = os.Remove(r.proverFile)
|
||||
_ = os.Remove(r.Config.KeystorePath)
|
||||
_ = os.Remove(r.bboltDB)
|
||||
}
|
||||
|
||||
// MockConfig creates a new prover config.
|
||||
func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.ProofType) error {
|
||||
cfg, err := config.NewConfig(r.originFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.ProverName = fmt.Sprintf("%s_%d", r.name, r.index)
|
||||
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.testApps.Timestamp, cfg.ProverName)
|
||||
|
||||
endpoint, err := r.testApps.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.L2Geth.Endpoint = endpoint
|
||||
cfg.L2Geth.Confirmations = rpc.LatestBlockNumber
|
||||
// Reuse l1geth's keystore file
|
||||
cfg.KeystorePassword = "scrolltest"
|
||||
cfg.DBPath = r.bboltDB
|
||||
// Create keystore file.
|
||||
_, err = utils.LoadOrCreateKey(cfg.KeystorePath, cfg.KeystorePassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.Coordinator.BaseURL = httpURL
|
||||
cfg.Coordinator.RetryCount = 10
|
||||
cfg.Coordinator.RetryWaitTimeSec = 10
|
||||
cfg.Coordinator.ConnectionTimeoutSec = 30
|
||||
cfg.Core.ProofType = proofType
|
||||
r.Config = cfg
|
||||
|
||||
if !store {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := json.Marshal(r.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(r.proverFile, data, 0600)
|
||||
}
|
||||
7
prover/cmd/main.go
Normal file
7
prover/cmd/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/prover/cmd/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,18 +1,13 @@
|
||||
{
|
||||
"prover_name": "prover-1",
|
||||
"hard_fork_name": "homestead",
|
||||
"keystore_path": "keystore.json",
|
||||
"keystore_password": "prover-pwd",
|
||||
"db_path": "unique-db-path-for-prover-1",
|
||||
"proof_type": 2,
|
||||
"low_version_circuit": {
|
||||
"hard_fork_name": "bernoulli",
|
||||
"core": {
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"hard_fork_name": "curie",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
"assets_path": "assets",
|
||||
"proof_type": 2
|
||||
},
|
||||
"coordinator": {
|
||||
"base_url": "http://localhost:8555",
|
||||
@@ -21,6 +16,7 @@
|
||||
"connection_timeout_sec": 30
|
||||
},
|
||||
"l2geth": {
|
||||
"endpoint": "http://localhost:9999"
|
||||
"endpoint": "http://localhost:9999",
|
||||
"confirmations": "0x1"
|
||||
}
|
||||
}
|
||||
|
||||
66
prover/config/config.go
Normal file
66
prover/config/config.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// Config loads prover configuration items.
|
||||
type Config struct {
|
||||
ProverName string `json:"prover_name"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
KeystorePath string `json:"keystore_path"`
|
||||
KeystorePassword string `json:"keystore_password"`
|
||||
Core *ProverCoreConfig `json:"core"`
|
||||
DBPath string `json:"db_path"`
|
||||
Coordinator *CoordinatorConfig `json:"coordinator"`
|
||||
L2Geth *L2GethConfig `json:"l2geth,omitempty"` // only for chunk_prover
|
||||
}
|
||||
|
||||
// ProverCoreConfig load zk prover config.
|
||||
type ProverCoreConfig struct {
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
ProofType message.ProofType `json:"proof_type,omitempty"` // 1: chunk prover (default type), 2: batch prover
|
||||
DumpDir string `json:"dump_dir,omitempty"`
|
||||
}
|
||||
|
||||
// CoordinatorConfig represents the configuration for the Coordinator client.
|
||||
type CoordinatorConfig struct {
|
||||
BaseURL string `json:"base_url"`
|
||||
RetryCount int `json:"retry_count"`
|
||||
RetryWaitTimeSec int `json:"retry_wait_time_sec"`
|
||||
ConnectionTimeoutSec int `json:"connection_timeout_sec"`
|
||||
}
|
||||
|
||||
// L2GethConfig represents the configuration for the l2geth client.
|
||||
type L2GethConfig struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
Confirmations rpc.BlockNumber `json:"confirmations"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewConfig(file string) (*Config, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &Config{}
|
||||
if err = json.Unmarshal(buf, cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !filepath.IsAbs(cfg.DBPath) {
|
||||
if cfg.DBPath, err = filepath.Abs(cfg.DBPath); err != nil {
|
||||
log.Error("Failed to get abs path", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
45
prover/core/mock.go
Normal file
45
prover/core/mock.go
Normal file
@@ -0,0 +1,45 @@
|
||||
//go:build mock_prover
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/prover/config"
|
||||
)
|
||||
|
||||
// ProverCore sends block-traces to rust-prover through socket and get back the zk-proof.
|
||||
type ProverCore struct {
|
||||
cfg *config.ProverCoreConfig
|
||||
VK string
|
||||
}
|
||||
|
||||
// NewProverCore inits a ProverCore object.
|
||||
func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
|
||||
return &ProverCore{cfg: cfg}, nil
|
||||
}
|
||||
|
||||
func (p *ProverCore) ProveChunk(taskID string, traces []*types.BlockTrace) (*message.ChunkProof, error) {
|
||||
_empty := common.BigToHash(big.NewInt(0))
|
||||
return &message.ChunkProof{
|
||||
StorageTrace: _empty[:],
|
||||
Protocol: _empty[:],
|
||||
Proof: _empty[:],
|
||||
Instances: _empty[:],
|
||||
Vk: _empty[:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo, chunkProofs []*message.ChunkProof) (*message.BatchProof, error) {
|
||||
_empty := common.BigToHash(big.NewInt(0))
|
||||
return &message.BatchProof{
|
||||
Proof: _empty[:],
|
||||
Instances: _empty[:],
|
||||
Vk: _empty[:],
|
||||
}, nil
|
||||
}
|
||||
258
prover/core/prover.go
Normal file
258
prover/core/prover.go
Normal file
@@ -0,0 +1,258 @@
|
||||
//go:build !mock_prover
|
||||
|
||||
package core
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
import "C" //nolint:typecheck
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"unsafe"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/prover/config"
|
||||
)
|
||||
|
||||
// ProverCore sends block-traces to rust-prover through ffi and get back the zk-proof.
|
||||
type ProverCore struct {
|
||||
cfg *config.ProverCoreConfig
|
||||
VK string
|
||||
}
|
||||
|
||||
// NewProverCore inits a ProverCore object.
|
||||
func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
|
||||
paramsPathStr := C.CString(cfg.ParamsPath)
|
||||
assetsPathStr := C.CString(cfg.AssetsPath)
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(paramsPathStr))
|
||||
C.free(unsafe.Pointer(assetsPathStr))
|
||||
}()
|
||||
|
||||
var vk string
|
||||
var rawVK *C.char
|
||||
if cfg.ProofType == message.ProofTypeBatch {
|
||||
C.init_batch_prover(paramsPathStr, assetsPathStr)
|
||||
rawVK = C.get_batch_vk()
|
||||
} else if cfg.ProofType == message.ProofTypeChunk {
|
||||
C.init_chunk_prover(paramsPathStr, assetsPathStr)
|
||||
rawVK = C.get_chunk_vk()
|
||||
}
|
||||
defer C.free_c_chars(rawVK)
|
||||
|
||||
if rawVK != nil {
|
||||
vk = C.GoString(rawVK)
|
||||
}
|
||||
|
||||
if cfg.DumpDir != "" {
|
||||
err := os.MkdirAll(cfg.DumpDir, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("Enabled dump_proof", "dir", cfg.DumpDir)
|
||||
}
|
||||
|
||||
return &ProverCore{cfg: cfg, VK: vk}, nil
|
||||
}
|
||||
|
||||
// ProveBatch call rust ffi to generate batch proof.
|
||||
func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo, chunkProofs []*message.ChunkProof) (*message.BatchProof, error) {
|
||||
if p.cfg.ProofType != message.ProofTypeBatch {
|
||||
return nil, fmt.Errorf("prover is not a batch-prover (type: %v), but is trying to prove a batch", p.cfg.ProofType)
|
||||
}
|
||||
|
||||
chunkInfosByt, err := json.Marshal(chunkInfos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkProofsByt, err := json.Marshal(chunkProofs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isValid, err := p.checkChunkProofs(chunkProofsByt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isValid {
|
||||
return nil, fmt.Errorf("non-match chunk protocol, task-id: %s", taskID)
|
||||
}
|
||||
|
||||
proofByt, err := p.proveBatch(chunkInfosByt, chunkProofsByt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate batch proof: %v", err)
|
||||
}
|
||||
|
||||
err = p.mayDumpProof(taskID, proofByt)
|
||||
if err != nil {
|
||||
log.Error("Dump batch proof failed", "task-id", taskID, "error", err)
|
||||
}
|
||||
|
||||
zkProof := &message.BatchProof{}
|
||||
return zkProof, json.Unmarshal(proofByt, zkProof)
|
||||
}
|
||||
|
||||
// ProveChunk call rust ffi to generate chunk proof.
|
||||
func (p *ProverCore) ProveChunk(taskID string, traces []*types.BlockTrace) (*message.ChunkProof, error) {
|
||||
if p.cfg.ProofType != message.ProofTypeChunk {
|
||||
return nil, fmt.Errorf("prover is not a chunk-prover (type: %v), but is trying to prove a chunk", p.cfg.ProofType)
|
||||
}
|
||||
|
||||
tracesByt, err := json.Marshal(traces)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proofByt, err := p.proveChunk(tracesByt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = p.mayDumpProof(taskID, proofByt)
|
||||
if err != nil {
|
||||
log.Error("Dump chunk proof failed", "task-id", taskID, "error", err)
|
||||
}
|
||||
|
||||
zkProof := &message.ChunkProof{}
|
||||
return zkProof, json.Unmarshal(proofByt, zkProof)
|
||||
}
|
||||
|
||||
// TracesToChunkInfo convert traces to chunk info
|
||||
func (p *ProverCore) TracesToChunkInfo(traces []*types.BlockTrace) (*message.ChunkInfo, error) {
|
||||
tracesByt, err := json.Marshal(traces)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkInfoByt := p.tracesToChunkInfo(tracesByt)
|
||||
|
||||
chunkInfo := &message.ChunkInfo{}
|
||||
return chunkInfo, json.Unmarshal(chunkInfoByt, chunkInfo)
|
||||
}
|
||||
|
||||
// CheckChunkProofsResponse represents the result of a chunk proof checking operation.
|
||||
// Ok indicates whether the proof checking was successful.
|
||||
// Error provides additional details in case the check failed.
|
||||
type CheckChunkProofsResponse struct {
|
||||
Ok bool `json:"ok"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ProofResult encapsulates the result from generating a proof.
|
||||
// Message holds the generated proof in byte slice format.
|
||||
// Error provides additional details in case the proof generation failed.
|
||||
type ProofResult struct {
|
||||
Message []byte `json:"message,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
func (p *ProverCore) checkChunkProofs(chunkProofsByt []byte) (bool, error) {
|
||||
chunkProofsStr := C.CString(string(chunkProofsByt))
|
||||
defer C.free(unsafe.Pointer(chunkProofsStr))
|
||||
|
||||
log.Info("Start to check chunk proofs ...")
|
||||
cResult := C.check_chunk_proofs(chunkProofsStr)
|
||||
defer C.free_c_chars(cResult)
|
||||
log.Info("Finish checking chunk proofs!")
|
||||
|
||||
var result CheckChunkProofsResponse
|
||||
err := json.Unmarshal([]byte(C.GoString(cResult)), &result)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to parse check chunk proofs result: %v", err)
|
||||
}
|
||||
|
||||
if result.Error != "" {
|
||||
return false, fmt.Errorf("failed to check chunk proofs: %s", result.Error)
|
||||
}
|
||||
|
||||
return result.Ok, nil
|
||||
}
|
||||
|
||||
func (p *ProverCore) proveBatch(chunkInfosByt []byte, chunkProofsByt []byte) ([]byte, error) {
|
||||
chunkInfosStr := C.CString(string(chunkInfosByt))
|
||||
chunkProofsStr := C.CString(string(chunkProofsByt))
|
||||
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(chunkInfosStr))
|
||||
C.free(unsafe.Pointer(chunkProofsStr))
|
||||
}()
|
||||
|
||||
log.Info("Start to create batch proof ...")
|
||||
bResult := C.gen_batch_proof(chunkInfosStr, chunkProofsStr)
|
||||
defer C.free_c_chars(bResult)
|
||||
log.Info("Finish creating batch proof!")
|
||||
|
||||
var result ProofResult
|
||||
err := json.Unmarshal([]byte(C.GoString(bResult)), &result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse batch proof result: %v", err)
|
||||
}
|
||||
|
||||
if result.Error != "" {
|
||||
return nil, fmt.Errorf("failed to generate batch proof: %s", result.Error)
|
||||
}
|
||||
|
||||
return result.Message, nil
|
||||
}
|
||||
|
||||
func (p *ProverCore) proveChunk(tracesByt []byte) ([]byte, error) {
|
||||
tracesStr := C.CString(string(tracesByt))
|
||||
defer C.free(unsafe.Pointer(tracesStr))
|
||||
|
||||
log.Info("Start to create chunk proof ...")
|
||||
cProof := C.gen_chunk_proof(tracesStr)
|
||||
defer C.free_c_chars(cProof)
|
||||
log.Info("Finish creating chunk proof!")
|
||||
|
||||
var result ProofResult
|
||||
err := json.Unmarshal([]byte(C.GoString(cProof)), &result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse chunk proof result: %v", err)
|
||||
}
|
||||
|
||||
if result.Error != "" {
|
||||
return nil, fmt.Errorf("failed to generate chunk proof: %s", result.Error)
|
||||
}
|
||||
|
||||
return result.Message, nil
|
||||
}
|
||||
|
||||
func (p *ProverCore) mayDumpProof(id string, proofByt []byte) error {
|
||||
if p.cfg.DumpDir == "" {
|
||||
return nil
|
||||
}
|
||||
path := filepath.Join(p.cfg.DumpDir, id)
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err = f.Close(); err != nil {
|
||||
log.Error("failed to close proof dump file", "id", id, "error", err)
|
||||
}
|
||||
}()
|
||||
log.Info("Saving proof", "task-id", id)
|
||||
_, err = f.Write(proofByt)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *ProverCore) tracesToChunkInfo(tracesByt []byte) []byte {
|
||||
tracesStr := C.CString(string(tracesByt))
|
||||
defer C.free(unsafe.Pointer(tracesStr))
|
||||
|
||||
cChunkInfo := C.block_traces_to_chunk_info(tracesStr)
|
||||
defer C.free_c_chars(cChunkInfo)
|
||||
|
||||
chunkInfo := C.GoString(cChunkInfo)
|
||||
return []byte(chunkInfo)
|
||||
}
|
||||
154
prover/core/prover_test.go
Normal file
154
prover/core/prover_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
//go:build ffi
|
||||
|
||||
// go test -v -race -gcflags="-l" -ldflags="-s=false" -tags ffi ./...
|
||||
package core_test
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/prover/config"
|
||||
"scroll-tech/prover/core"
|
||||
)
|
||||
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
|
||||
proofDumpPath = flag.String("dump", "/assets/proof_data", "the path proofs dump to")
|
||||
batchDirPath = flag.String("batch-dir", "/assets/traces/batch_24", "batch directory")
|
||||
batchVkPath = flag.String("batch-vk", "/assets/test_assets/agg_vk.vkey", "batch vk")
|
||||
chunkVkPath = flag.String("chunk-vk", "/assets/test_assets/chunk_vk.vkey", "chunk vk")
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
|
||||
chunkProverConfig := &config.ProverCoreConfig{
|
||||
DumpDir: *proofDumpPath,
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPath,
|
||||
ProofType: message.ProofTypeChunk,
|
||||
}
|
||||
chunkProverCore, err := core.NewProverCore(chunkProverConfig)
|
||||
as.NoError(err)
|
||||
t.Log("Constructed chunk prover")
|
||||
|
||||
as.Equal(chunkProverCore.VK, readVk(*chunkVkPath, as))
|
||||
t.Log("Chunk VK must be available when init")
|
||||
|
||||
// Get the list of subdirectories (chunks)
|
||||
chunkDirs, err := os.ReadDir(*batchDirPath)
|
||||
as.NoError(err)
|
||||
sort.Slice(chunkDirs, func(i, j int) bool {
|
||||
return chunkDirs[i].Name() < chunkDirs[j].Name()
|
||||
})
|
||||
|
||||
chunkInfos := make([]*message.ChunkInfo, 0, len(chunkDirs))
|
||||
chunkProofs := make([]*message.ChunkProof, 0, len(chunkDirs))
|
||||
|
||||
for i, dir := range chunkDirs {
|
||||
if dir.IsDir() {
|
||||
chunkPath := filepath.Join(*batchDirPath, dir.Name())
|
||||
|
||||
chunkTrace := readChunkTrace(chunkPath, as)
|
||||
t.Logf("Loaded chunk trace %d", i+1)
|
||||
|
||||
chunkInfo, err := chunkProverCore.TracesToChunkInfo(chunkTrace)
|
||||
as.NoError(err)
|
||||
chunkInfos = append(chunkInfos, chunkInfo)
|
||||
t.Logf("Converted to chunk info %d", i+1)
|
||||
|
||||
chunkProof, err := chunkProverCore.ProveChunk(fmt.Sprintf("chunk_proof%d", i+1), chunkTrace)
|
||||
as.NoError(err)
|
||||
chunkProofs = append(chunkProofs, chunkProof)
|
||||
t.Logf("Generated and dumped chunk proof %d", i+1)
|
||||
}
|
||||
}
|
||||
|
||||
as.Equal(chunkProverCore.VK, readVk(*chunkVkPath, as))
|
||||
t.Log("Chunk VKs must be equal after proving")
|
||||
|
||||
batchProverConfig := &config.ProverCoreConfig{
|
||||
DumpDir: *proofDumpPath,
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPath,
|
||||
ProofType: message.ProofTypeBatch,
|
||||
}
|
||||
batchProverCore, err := core.NewProverCore(batchProverConfig)
|
||||
as.NoError(err)
|
||||
|
||||
as.Equal(batchProverCore.VK, readVk(*batchVkPath, as))
|
||||
t.Log("Batch VK must be available when init")
|
||||
|
||||
_, err = batchProverCore.ProveBatch("batch_proof", chunkInfos, chunkProofs)
|
||||
as.NoError(err)
|
||||
t.Log("Generated and dumped batch proof")
|
||||
|
||||
as.Equal(batchProverCore.VK, readVk(*batchVkPath, as))
|
||||
t.Log("Batch VKs must be equal after proving")
|
||||
}
|
||||
func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
|
||||
fileInfo, err := os.Stat(filePat)
|
||||
as.NoError(err)
|
||||
|
||||
var traces []*types.BlockTrace
|
||||
|
||||
readFile := func(path string) {
|
||||
f, err := os.Open(path)
|
||||
as.NoError(err)
|
||||
defer func() {
|
||||
as.NoError(f.Close())
|
||||
}()
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
trace := &types.BlockTrace{}
|
||||
as.NoError(json.Unmarshal(byt, trace))
|
||||
|
||||
traces = append(traces, trace)
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
files, err := os.ReadDir(filePat)
|
||||
as.NoError(err)
|
||||
|
||||
// Sort files alphabetically
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Name() < files[j].Name()
|
||||
})
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
readFile(filepath.Join(filePat, file.Name()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
readFile(filePat)
|
||||
}
|
||||
|
||||
return traces
|
||||
}
|
||||
|
||||
func readVk(filePat string, as *assert.Assertions) string {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
defer func() {
|
||||
as.NoError(f.Close())
|
||||
}()
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
return base64.StdEncoding.EncodeToString(byt)
|
||||
}
|
||||
82
prover/go.mod
Normal file
82
prover/go.mod
Normal file
@@ -0,0 +1,82 @@
|
||||
module scroll-tech/prover
|
||||
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.12.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/onsi/gomega v1.27.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.2 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/net v0.20.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
296
prover/go.sum
Normal file
296
prover/go.sum
Normal file
@@ -0,0 +1,296 @@
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
|
||||
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
|
||||
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
|
||||
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ue
|
||||
|
||||
higher_zkevm_item=`grep "zkevm-circuits.git" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
|
||||
|
||||
higher_version=`echo $higher_zkevm_item | awk '{print $1}'`
|
||||
|
||||
higher_commit=`echo $higher_zkevm_item | cut -d ' ' -f2 | cut -c-7`
|
||||
|
||||
echo "$higher_version $higher_commit"
|
||||
420
prover/prover.go
Normal file
420
prover/prover.go
Normal file
@@ -0,0 +1,420 @@
|
||||
package prover
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/prover/client"
|
||||
"scroll-tech/prover/config"
|
||||
"scroll-tech/prover/core"
|
||||
"scroll-tech/prover/store"
|
||||
putils "scroll-tech/prover/utils"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// retry connecting to coordinator
|
||||
retryWait = time.Second * 10
|
||||
)
|
||||
|
||||
// Prover contains websocket conn to coordinator, and task stack.
|
||||
type Prover struct {
|
||||
ctx context.Context
|
||||
cfg *config.Config
|
||||
coordinatorClient *client.CoordinatorClient
|
||||
stack *store.Stack
|
||||
l2GethClient *ethclient.Client // only applicable for a chunk_prover
|
||||
proverCore *core.ProverCore
|
||||
|
||||
isClosed int64
|
||||
stopChan chan struct{}
|
||||
|
||||
priv *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
// NewProver new a Prover object.
|
||||
func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
|
||||
// load or create wallet
|
||||
priv, err := utils.LoadOrCreateKey(cfg.KeystorePath, cfg.KeystorePassword)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get stack db handler
|
||||
stackDb, err := store.NewStack(cfg.DBPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var l2GethClient *ethclient.Client
|
||||
if cfg.Core.ProofType == message.ProofTypeChunk {
|
||||
if cfg.L2Geth == nil || cfg.L2Geth.Endpoint == "" {
|
||||
return nil, errors.New("Missing l2geth config for chunk prover")
|
||||
}
|
||||
// Connect l2geth node. Only applicable for a chunk_prover.
|
||||
l2GethClient, err = ethclient.DialContext(ctx, cfg.L2Geth.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Use gzip compression.
|
||||
l2GethClient.SetHeader("Accept-Encoding", "gzip")
|
||||
}
|
||||
|
||||
// Create prover_core instance
|
||||
log.Info("init prover_core")
|
||||
newProverCore, err := core.NewProverCore(cfg.Core)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("init prover_core successfully!")
|
||||
|
||||
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Prover{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
coordinatorClient: coordinatorClient,
|
||||
l2GethClient: l2GethClient,
|
||||
stack: stackDb,
|
||||
proverCore: newProverCore,
|
||||
stopChan: make(chan struct{}),
|
||||
priv: priv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Type returns prover type.
|
||||
func (r *Prover) Type() message.ProofType {
|
||||
return r.cfg.Core.ProofType
|
||||
}
|
||||
|
||||
// PublicKey translate public key to hex and return.
|
||||
func (r *Prover) PublicKey() string {
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(&r.priv.PublicKey))
|
||||
}
|
||||
|
||||
// Start runs Prover.
|
||||
func (r *Prover) Start() {
|
||||
log.Info("start to login to coordinator")
|
||||
if err := r.coordinatorClient.Login(r.ctx); err != nil {
|
||||
log.Crit("login to coordinator failed", "error", err)
|
||||
}
|
||||
log.Info("login to coordinator successfully!")
|
||||
|
||||
go r.ProveLoop()
|
||||
}
|
||||
|
||||
// ProveLoop keep popping the block-traces from Stack and sends it to rust-prover for loop.
|
||||
func (r *Prover) ProveLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-r.stopChan:
|
||||
return
|
||||
default:
|
||||
if err := r.proveAndSubmit(); err != nil {
|
||||
log.Error("proveAndSubmit", "prover type", r.cfg.Core.ProofType, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Prover) proveAndSubmit() error {
|
||||
task, err := r.stack.Peek()
|
||||
if err != nil {
|
||||
if !errors.Is(err, store.ErrEmpty) {
|
||||
return fmt.Errorf("failed to peek from stack: %v", err)
|
||||
}
|
||||
// fetch new proving task.
|
||||
task, err = r.fetchTaskFromCoordinator()
|
||||
if err != nil {
|
||||
time.Sleep(retryWait)
|
||||
return fmt.Errorf("failed to fetch task from coordinator: %v", err)
|
||||
}
|
||||
|
||||
// Push the new task into the stack
|
||||
if err = r.stack.Push(task); err != nil {
|
||||
return fmt.Errorf("failed to push task into stack: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var proofMsg *message.ProofDetail
|
||||
if task.Times <= 2 {
|
||||
// If tried times <= 2, try to proof the task.
|
||||
if err = r.stack.UpdateTimes(task, task.Times+1); err != nil {
|
||||
return fmt.Errorf("failed to update times on stack: %v", err)
|
||||
}
|
||||
|
||||
log.Info("start to prove task", "task-type", task.Task.Type, "task-id", task.Task.ID)
|
||||
proofMsg, err = r.prove(task)
|
||||
if err != nil { // handling error from prove
|
||||
log.Error("failed to prove task", "task_type", task.Task.Type, "task-id", task.Task.ID, "err", err)
|
||||
return r.submitErr(task, message.ProofFailureNoPanic, err)
|
||||
}
|
||||
return r.submitProof(proofMsg, task.Task.UUID)
|
||||
}
|
||||
|
||||
// if tried times >= 3, it's probably due to circuit proving panic
|
||||
log.Error("zk proving panic for task", "task-type", task.Task.Type, "task-id", task.Task.ID)
|
||||
return r.submitErr(task, message.ProofFailurePanic, errors.New("zk proving panic for task"))
|
||||
}
|
||||
|
||||
// fetchTaskFromCoordinator fetches a new task from the server
|
||||
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
|
||||
// prepare the request
|
||||
req := &client.GetTaskRequest{
|
||||
TaskType: r.Type(),
|
||||
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
|
||||
// instead of passing vk when we login
|
||||
VK: r.proverCore.VK,
|
||||
}
|
||||
|
||||
if req.TaskType == message.ProofTypeChunk {
|
||||
// get the latest confirmed block number
|
||||
latestBlockNumber, err := putils.GetLatestConfirmedBlockNumber(r.ctx, r.l2GethClient, r.cfg.L2Geth.Confirmations)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch latest confirmed block number: %v", err)
|
||||
}
|
||||
|
||||
if latestBlockNumber == 0 {
|
||||
return nil, fmt.Errorf("omit to prove task of the genesis block, latestBlockNumber: %v", latestBlockNumber)
|
||||
}
|
||||
req.ProverHeight = latestBlockNumber
|
||||
}
|
||||
|
||||
// send the request
|
||||
resp, err := r.coordinatorClient.GetTask(r.ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get task, req: %v, err: %v", req, err)
|
||||
}
|
||||
|
||||
// create a new TaskMsg
|
||||
taskMsg := message.TaskMsg{
|
||||
UUID: resp.Data.UUID,
|
||||
ID: resp.Data.TaskID,
|
||||
Type: message.ProofType(resp.Data.TaskType),
|
||||
}
|
||||
|
||||
// depending on the task type, unmarshal the task data into the appropriate field
|
||||
switch taskMsg.Type {
|
||||
case message.ProofTypeBatch:
|
||||
taskMsg.BatchTaskDetail = &message.BatchTaskDetail{}
|
||||
if err = json.Unmarshal([]byte(resp.Data.TaskData), taskMsg.BatchTaskDetail); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal batch task detail: %v", err)
|
||||
}
|
||||
case message.ProofTypeChunk:
|
||||
taskMsg.ChunkTaskDetail = &message.ChunkTaskDetail{}
|
||||
if err = json.Unmarshal([]byte(resp.Data.TaskData), taskMsg.ChunkTaskDetail); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal chunk task detail: %v", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown task type: %v", taskMsg.Type)
|
||||
}
|
||||
|
||||
// convert the response task to a ProvingTask
|
||||
provingTask := &store.ProvingTask{
|
||||
Task: &taskMsg,
|
||||
Times: 0,
|
||||
}
|
||||
|
||||
// marshal the task to a json string for logging
|
||||
taskJSON, err := json.Marshal(provingTask)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal task to json: %v", err)
|
||||
}
|
||||
|
||||
log.Info("successfully fetched new task from coordinator", "resp", resp, "task", string(taskJSON))
|
||||
|
||||
return provingTask, nil
|
||||
}
|
||||
|
||||
// prove function tries to prove a task. It returns an error if the proof fails.
|
||||
func (r *Prover) prove(task *store.ProvingTask) (*message.ProofDetail, error) {
|
||||
detail := &message.ProofDetail{
|
||||
ID: task.Task.ID,
|
||||
Type: task.Task.Type,
|
||||
Status: message.StatusOk,
|
||||
}
|
||||
|
||||
switch r.Type() {
|
||||
case message.ProofTypeChunk:
|
||||
proof, err := r.proveChunk(task)
|
||||
if err != nil {
|
||||
detail.Status = message.StatusProofError
|
||||
detail.Error = err.Error()
|
||||
return detail, err
|
||||
}
|
||||
detail.ChunkProof = proof
|
||||
log.Info("prove chunk success", "task-id", task.Task.ID)
|
||||
return detail, nil
|
||||
|
||||
case message.ProofTypeBatch:
|
||||
proof, err := r.proveBatch(task)
|
||||
if err != nil {
|
||||
detail.Status = message.StatusProofError
|
||||
detail.Error = err.Error()
|
||||
return detail, err
|
||||
}
|
||||
detail.BatchProof = proof
|
||||
log.Info("prove batch success", "task-id", task.Task.ID)
|
||||
return detail, nil
|
||||
|
||||
default:
|
||||
err := fmt.Errorf("invalid task type: %v", task.Task.Type)
|
||||
return detail, err
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Prover) proveChunk(task *store.ProvingTask) (*message.ChunkProof, error) {
|
||||
if task.Task.ChunkTaskDetail == nil {
|
||||
return nil, fmt.Errorf("ChunkTaskDetail is empty")
|
||||
}
|
||||
traces, err := r.getSortedTracesByHashes(task.Task.ChunkTaskDetail.BlockHashes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get traces from eth node failed, block hashes: %v, err: %v", task.Task.ChunkTaskDetail.BlockHashes, err)
|
||||
}
|
||||
return r.proverCore.ProveChunk(task.Task.ID, traces)
|
||||
}
|
||||
|
||||
func (r *Prover) proveBatch(task *store.ProvingTask) (*message.BatchProof, error) {
|
||||
if task.Task.BatchTaskDetail == nil {
|
||||
return nil, fmt.Errorf("BatchTaskDetail is empty")
|
||||
}
|
||||
return r.proverCore.ProveBatch(task.Task.ID, task.Task.BatchTaskDetail.ChunkInfos, task.Task.BatchTaskDetail.ChunkProofs)
|
||||
}
|
||||
|
||||
func (r *Prover) submitProof(msg *message.ProofDetail, uuid string) error {
|
||||
// prepare the submit request
|
||||
req := &client.SubmitProofRequest{
|
||||
UUID: uuid,
|
||||
TaskID: msg.ID,
|
||||
TaskType: int(msg.Type),
|
||||
Status: int(msg.Status),
|
||||
}
|
||||
|
||||
// marshal proof by tasktype
|
||||
switch msg.Type {
|
||||
case message.ProofTypeChunk:
|
||||
if msg.ChunkProof != nil {
|
||||
proofData, err := json.Marshal(msg.ChunkProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling chunk proof: %v", err)
|
||||
}
|
||||
req.Proof = string(proofData)
|
||||
}
|
||||
case message.ProofTypeBatch:
|
||||
if msg.BatchProof != nil {
|
||||
proofData, err := json.Marshal(msg.BatchProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling batch proof: %v", err)
|
||||
}
|
||||
req.Proof = string(proofData)
|
||||
}
|
||||
}
|
||||
|
||||
// send the submit request
|
||||
if err := r.coordinatorClient.SubmitProof(r.ctx, req); err != nil {
|
||||
if !errors.Is(errors.Unwrap(err), client.ErrCoordinatorConnect) {
|
||||
if deleteErr := r.stack.Delete(msg.ID); deleteErr != nil {
|
||||
log.Error("prover stack pop failed", "task_type", msg.Type, "task_id", msg.ID, "err", deleteErr)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("error submitting proof: %v", err)
|
||||
}
|
||||
|
||||
if deleteErr := r.stack.Delete(msg.ID); deleteErr != nil {
|
||||
log.Error("prover stack pop failed", "task_type", msg.Type, "task_id", msg.ID, "err", deleteErr)
|
||||
}
|
||||
log.Info("proof submitted successfully", "task-id", msg.ID, "task-type", msg.Type, "task-status", msg.Status, "err", msg.Error)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Prover) submitErr(task *store.ProvingTask, proofFailureType message.ProofFailureType, err error) error {
|
||||
// prepare the submit request
|
||||
req := &client.SubmitProofRequest{
|
||||
UUID: task.Task.UUID,
|
||||
TaskID: task.Task.ID,
|
||||
TaskType: int(task.Task.Type),
|
||||
Status: int(message.StatusProofError),
|
||||
Proof: "",
|
||||
FailureType: int(proofFailureType),
|
||||
FailureMsg: err.Error(),
|
||||
}
|
||||
|
||||
// send the submit request
|
||||
if submitErr := r.coordinatorClient.SubmitProof(r.ctx, req); submitErr != nil {
|
||||
if !errors.Is(errors.Unwrap(err), client.ErrCoordinatorConnect) {
|
||||
if deleteErr := r.stack.Delete(task.Task.ID); deleteErr != nil {
|
||||
log.Error("prover stack pop failed", "task_type", task.Task.Type, "task_id", task.Task.ID, "err", deleteErr)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("error submitting proof: %v", submitErr)
|
||||
}
|
||||
if deleteErr := r.stack.Delete(task.Task.ID); deleteErr != nil {
|
||||
log.Error("prover stack pop failed", "task_type", task.Task.Type, "task_id", task.Task.ID, "err", deleteErr)
|
||||
}
|
||||
|
||||
log.Info("proof submitted report failure successfully",
|
||||
"task-id", task.Task.ID, "task-type", task.Task.Type,
|
||||
"task-status", message.StatusProofError, "err", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Prover) getSortedTracesByHashes(blockHashes []common.Hash) ([]*types.BlockTrace, error) {
|
||||
if len(blockHashes) == 0 {
|
||||
return nil, fmt.Errorf("blockHashes is empty")
|
||||
}
|
||||
|
||||
var traces []*types.BlockTrace
|
||||
for _, blockHash := range blockHashes {
|
||||
trace, err := r.l2GethClient.GetBlockTraceByHash(r.ctx, blockHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
traces = append(traces, trace)
|
||||
}
|
||||
|
||||
// Sort BlockTraces by header number.
|
||||
sort.Slice(traces, func(i, j int) bool {
|
||||
return traces[i].Header.Number.Int64() < traces[j].Header.Number.Int64()
|
||||
})
|
||||
|
||||
// Check that the block numbers are continuous
|
||||
for i := 0; i < len(traces)-1; i++ {
|
||||
if traces[i].Header.Number.Int64()+1 != traces[i+1].Header.Number.Int64() {
|
||||
return nil, fmt.Errorf("block numbers are not continuous, got %v and %v",
|
||||
traces[i].Header.Number.Int64(), traces[i+1].Header.Number.Int64())
|
||||
}
|
||||
}
|
||||
return traces, nil
|
||||
}
|
||||
|
||||
// Stop closes the websocket connection.
|
||||
func (r *Prover) Stop() {
|
||||
if atomic.LoadInt64(&r.isClosed) == 1 {
|
||||
return
|
||||
}
|
||||
atomic.StoreInt64(&r.isClosed, 1)
|
||||
|
||||
close(r.stopChan)
|
||||
// Close db
|
||||
if err := r.stack.Close(); err != nil {
|
||||
log.Error("failed to close bbolt db", "error", err)
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
nightly-2023-12-03
|
||||
@@ -1,9 +0,0 @@
|
||||
edition = "2021"
|
||||
|
||||
comment_width = 100
|
||||
imports_granularity = "Crate"
|
||||
max_width = 100
|
||||
newline_style = "Unix"
|
||||
# normalize_comments = true
|
||||
reorder_imports = true
|
||||
wrap_comments = true
|
||||
@@ -1,103 +0,0 @@
|
||||
use anyhow::{bail, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::File;
|
||||
|
||||
use crate::types::ProofType;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CoordinatorConfig {
|
||||
pub base_url: String,
|
||||
pub retry_count: u32,
|
||||
pub retry_wait_time_sec: u64,
|
||||
pub connection_timeout_sec: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct L2GethConfig {
|
||||
pub endpoint: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub prover_name: String,
|
||||
pub keystore_path: String,
|
||||
pub keystore_password: String,
|
||||
pub db_path: String,
|
||||
#[serde(default)]
|
||||
pub proof_type: ProofType,
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
pub coordinator: CoordinatorConfig,
|
||||
pub l2geth: Option<L2GethConfig>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_reader<R>(reader: R) -> Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
pub fn from_file(file_name: String) -> Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Config::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
static SCROLL_PROVER_ASSETS_DIR_ENV_NAME: &str = "SCROLL_PROVER_ASSETS_DIR";
|
||||
static mut SCROLL_PROVER_ASSETS_DIRS: Vec<String> = vec![];
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AssetsDirEnvConfig {}
|
||||
|
||||
impl AssetsDirEnvConfig {
|
||||
pub fn init() -> Result<()> {
|
||||
let value = std::env::var(SCROLL_PROVER_ASSETS_DIR_ENV_NAME)?;
|
||||
let dirs: Vec<&str> = value.split(',').collect();
|
||||
if dirs.len() != 2 {
|
||||
bail!("env variable SCROLL_PROVER_ASSETS_DIR value must be 2 parts seperated by comma.")
|
||||
}
|
||||
unsafe {
|
||||
SCROLL_PROVER_ASSETS_DIRS = dirs.into_iter().map(|s| s.to_string()).collect();
|
||||
log::info!(
|
||||
"init SCROLL_PROVER_ASSETS_DIRS: {:?}",
|
||||
SCROLL_PROVER_ASSETS_DIRS
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn enable_first() {
|
||||
unsafe {
|
||||
log::info!(
|
||||
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
|
||||
&SCROLL_PROVER_ASSETS_DIRS[0]
|
||||
);
|
||||
std::env::set_var(
|
||||
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
|
||||
&SCROLL_PROVER_ASSETS_DIRS[0],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_second() {
|
||||
unsafe {
|
||||
log::info!(
|
||||
"set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}",
|
||||
&SCROLL_PROVER_ASSETS_DIRS[1]
|
||||
);
|
||||
std::env::set_var(
|
||||
SCROLL_PROVER_ASSETS_DIR_ENV_NAME,
|
||||
&SCROLL_PROVER_ASSETS_DIRS[1],
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
mod api;
|
||||
mod errors;
|
||||
pub mod listener;
|
||||
pub mod types;
|
||||
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use std::rc::Rc;
|
||||
|
||||
use api::Api;
|
||||
use errors::*;
|
||||
use listener::Listener;
|
||||
use tokio::runtime::Runtime;
|
||||
use types::*;
|
||||
|
||||
use crate::{config::Config, key_signer::KeySigner};
|
||||
|
||||
pub struct CoordinatorClient<'a> {
|
||||
api: Api,
|
||||
token: Option<String>,
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
rt: Runtime,
|
||||
listener: Box<dyn Listener>,
|
||||
}
|
||||
|
||||
impl<'a> CoordinatorClient<'a> {
|
||||
pub fn new(
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
listener: Box<dyn Listener>,
|
||||
) -> Result<Self> {
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
let api = Api::new(
|
||||
&config.coordinator.base_url,
|
||||
core::time::Duration::from_secs(config.coordinator.connection_timeout_sec),
|
||||
config.coordinator.retry_count,
|
||||
config.coordinator.retry_wait_time_sec,
|
||||
)?;
|
||||
let mut client = Self {
|
||||
api,
|
||||
token: None,
|
||||
config,
|
||||
key_signer,
|
||||
rt,
|
||||
listener,
|
||||
};
|
||||
client.login()?;
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
fn login(&mut self) -> Result<()> {
|
||||
let api = &self.api;
|
||||
let challenge_response = self.rt.block_on(api.challenge())?;
|
||||
if challenge_response.errcode != ErrorCode::Success {
|
||||
bail!("challenge failed: {}", challenge_response.errmsg)
|
||||
}
|
||||
let mut token: String;
|
||||
if let Some(r) = challenge_response.data {
|
||||
token = r.token;
|
||||
} else {
|
||||
bail!("challenge failed: got empty token")
|
||||
}
|
||||
|
||||
let login_message = LoginMessage {
|
||||
challenge: token.clone(),
|
||||
prover_name: self.config.prover_name.clone(),
|
||||
prover_version: crate::version::get_version(),
|
||||
};
|
||||
|
||||
let buffer = login_message.rlp();
|
||||
let signature = self.key_signer.sign_buffer(&buffer)?;
|
||||
let login_request = LoginRequest {
|
||||
message: login_message,
|
||||
signature,
|
||||
};
|
||||
let login_response = self.rt.block_on(api.login(&login_request, &token))?;
|
||||
if login_response.errcode != ErrorCode::Success {
|
||||
bail!("login failed: {}", login_response.errmsg)
|
||||
}
|
||||
if let Some(r) = login_response.data {
|
||||
token = r.token;
|
||||
} else {
|
||||
bail!("login failed: got empty token")
|
||||
}
|
||||
self.token = Some(token);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn action_with_re_login<T, F, R>(&mut self, req: &R, mut f: F) -> Result<Response<T>>
|
||||
where
|
||||
F: FnMut(&mut Self, &R) -> Result<Response<T>>,
|
||||
{
|
||||
let response = f(self, req)?;
|
||||
if response.errcode == ErrorCode::ErrJWTTokenExpired {
|
||||
log::info!("JWT expired, attempting to re-login");
|
||||
self.login().context("JWT expired, re-login failed")?;
|
||||
log::info!("re-login success");
|
||||
return self.action_with_re_login(req, f);
|
||||
} else if response.errcode != ErrorCode::Success {
|
||||
bail!("action failed: {}", response.errmsg)
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
fn do_get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
|
||||
self.rt
|
||||
.block_on(self.api.get_task(req, self.token.as_ref().unwrap()))
|
||||
}
|
||||
|
||||
pub fn get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
|
||||
self.action_with_re_login(req, |s, req| s.do_get_task(req))
|
||||
}
|
||||
|
||||
fn do_submit_proof(
|
||||
&mut self,
|
||||
req: &SubmitProofRequest,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
let response = self
|
||||
.rt
|
||||
.block_on(self.api.submit_proof(req, self.token.as_ref().unwrap()))?;
|
||||
self.listener.on_proof_submitted(req);
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub fn submit_proof(
|
||||
&mut self,
|
||||
req: &SubmitProofRequest,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
self.action_with_re_login(req, |s, req| s.do_submit_proof(req))
|
||||
}
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
use super::types::*;
|
||||
use anyhow::{bail, Result};
|
||||
use core::time::Duration;
|
||||
use reqwest::{header::CONTENT_TYPE, Url};
|
||||
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
|
||||
use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware};
|
||||
use serde::Serialize;
|
||||
|
||||
pub struct Api {
|
||||
url_base: Url,
|
||||
send_timeout: Duration,
|
||||
pub client: ClientWithMiddleware,
|
||||
}
|
||||
|
||||
impl Api {
|
||||
pub fn new(
|
||||
url_base: &str,
|
||||
send_timeout: Duration,
|
||||
retry_count: u32,
|
||||
retry_wait_time_sec: u64,
|
||||
) -> Result<Self> {
|
||||
let retry_wait_duration = core::time::Duration::from_secs(retry_wait_time_sec);
|
||||
let retry_policy = ExponentialBackoff::builder()
|
||||
.retry_bounds(retry_wait_duration / 2, retry_wait_duration)
|
||||
.build_with_max_retries(retry_count);
|
||||
|
||||
let client = ClientBuilder::new(reqwest::Client::new())
|
||||
.with(RetryTransientMiddleware::new_with_policy(retry_policy))
|
||||
.build();
|
||||
|
||||
Ok(Self {
|
||||
url_base: Url::parse(url_base)?,
|
||||
send_timeout,
|
||||
client,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn challenge(&self) -> Result<Response<ChallengeResponseData>> {
|
||||
let method = "/coordinator/v1/challenge";
|
||||
let url = self.build_url(method)?;
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.get(url)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.timeout(self.send_timeout)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let response_body = response.text().await?;
|
||||
|
||||
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
pub async fn login(
|
||||
&self,
|
||||
req: &LoginRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<LoginResponseData>> {
|
||||
let method = "/coordinator/v1/login";
|
||||
self.post_with_token(method, req, token).await
|
||||
}
|
||||
|
||||
pub async fn get_task(
|
||||
&self,
|
||||
req: &GetTaskRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<GetTaskResponseData>> {
|
||||
let method = "/coordinator/v1/get_task";
|
||||
self.post_with_token(method, req, token).await
|
||||
}
|
||||
|
||||
pub async fn submit_proof(
|
||||
&self,
|
||||
req: &SubmitProofRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
let method = "/coordinator/v1/submit_proof";
|
||||
self.post_with_token(method, req, token).await
|
||||
}
|
||||
|
||||
async fn post_with_token<Req, Resp>(
|
||||
&self,
|
||||
method: &str,
|
||||
req: &Req,
|
||||
token: &String,
|
||||
) -> Result<Resp>
|
||||
where
|
||||
Req: ?Sized + Serialize,
|
||||
Resp: serde::de::DeserializeOwned,
|
||||
{
|
||||
let url = self.build_url(method)?;
|
||||
let request_body = serde_json::to_string(req)?;
|
||||
|
||||
log::info!("[coordinator client], {method}, request: {request_body}");
|
||||
let response = self
|
||||
.client
|
||||
.post(url)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.bearer_auth(token)
|
||||
.body(request_body)
|
||||
.timeout(self.send_timeout)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if response.status() != http::status::StatusCode::OK {
|
||||
log::error!(
|
||||
"[coordinator client], {method}, status not ok: {}",
|
||||
response.status()
|
||||
);
|
||||
bail!(
|
||||
"[coordinator client], {method}, status not ok: {}",
|
||||
response.status()
|
||||
)
|
||||
}
|
||||
|
||||
let response_body = response.text().await?;
|
||||
|
||||
log::info!("[coordinator client], {method}, response: {response_body}");
|
||||
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
fn build_url(&self, method: &str) -> Result<Url> {
|
||||
self.url_base.join(method).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
use serde::{Deserialize, Deserializer};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ErrorCode {
|
||||
Success,
|
||||
InternalServerError,
|
||||
|
||||
ErrProverStatsAPIParameterInvalidNo,
|
||||
ErrProverStatsAPIProverTaskFailure,
|
||||
ErrProverStatsAPIProverTotalRewardFailure,
|
||||
|
||||
ErrCoordinatorParameterInvalidNo,
|
||||
ErrCoordinatorGetTaskFailure,
|
||||
ErrCoordinatorHandleZkProofFailure,
|
||||
ErrCoordinatorEmptyProofData,
|
||||
|
||||
ErrJWTCommonErr,
|
||||
ErrJWTTokenExpired,
|
||||
|
||||
Undefined(i32),
|
||||
}
|
||||
|
||||
impl ErrorCode {
|
||||
fn from_i32(v: i32) -> Self {
|
||||
match v {
|
||||
0 => ErrorCode::Success,
|
||||
500 => ErrorCode::InternalServerError,
|
||||
10001 => ErrorCode::ErrProverStatsAPIParameterInvalidNo,
|
||||
10002 => ErrorCode::ErrProverStatsAPIProverTaskFailure,
|
||||
10003 => ErrorCode::ErrProverStatsAPIProverTotalRewardFailure,
|
||||
20001 => ErrorCode::ErrCoordinatorParameterInvalidNo,
|
||||
20002 => ErrorCode::ErrCoordinatorGetTaskFailure,
|
||||
20003 => ErrorCode::ErrCoordinatorHandleZkProofFailure,
|
||||
20004 => ErrorCode::ErrCoordinatorEmptyProofData,
|
||||
50000 => ErrorCode::ErrJWTCommonErr,
|
||||
50001 => ErrorCode::ErrJWTTokenExpired,
|
||||
_ => {
|
||||
log::error!("get unexpected error code from coordinator: {v}");
|
||||
ErrorCode::Undefined(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ErrorCode {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: i32 = i32::deserialize(deserializer)?;
|
||||
Ok(ErrorCode::from_i32(v))
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
use super::SubmitProofRequest;
|
||||
|
||||
pub trait Listener {
|
||||
fn on_proof_submitted(&self, req: &SubmitProofRequest);
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
use super::errors::ErrorCode;
|
||||
use crate::types::{ProofFailureType, ProofStatus};
|
||||
use rlp::RlpStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Response<T> {
|
||||
pub errcode: ErrorCode,
|
||||
pub errmsg: String,
|
||||
pub data: Option<T>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginMessage {
|
||||
pub challenge: String,
|
||||
pub prover_name: String,
|
||||
pub prover_version: String,
|
||||
}
|
||||
|
||||
impl LoginMessage {
|
||||
pub fn rlp(&self) -> Vec<u8> {
|
||||
let mut rlp = RlpStream::new();
|
||||
let num_fields = 3;
|
||||
rlp.begin_list(num_fields);
|
||||
rlp.append(&self.prover_name);
|
||||
rlp.append(&self.prover_version);
|
||||
rlp.append(&self.challenge);
|
||||
rlp.out().freeze().into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginRequest {
|
||||
pub message: LoginMessage,
|
||||
pub signature: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginResponseData {
|
||||
pub time: String,
|
||||
pub token: String,
|
||||
}
|
||||
|
||||
pub type ChallengeResponseData = LoginResponseData;
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct GetTaskRequest {
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub prover_height: Option<u64>,
|
||||
pub vks: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct GetTaskResponseData {
|
||||
pub uuid: String,
|
||||
pub task_id: String,
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub task_data: String,
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct SubmitProofRequest {
|
||||
pub uuid: String,
|
||||
pub task_id: String,
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub status: ProofStatus,
|
||||
pub proof: String,
|
||||
pub failure_type: Option<ProofFailureType>,
|
||||
pub failure_msg: Option<String>,
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct SubmitProofResponseData {}
|
||||
@@ -1,57 +0,0 @@
|
||||
use crate::types::CommonHash;
|
||||
use anyhow::Result;
|
||||
use ethers_core::types::BlockNumber;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::fmt::Debug;
|
||||
|
||||
use ethers_providers::{Http, Provider};
|
||||
|
||||
pub struct GethClient {
|
||||
id: String,
|
||||
provider: Provider<Http>,
|
||||
rt: Runtime,
|
||||
}
|
||||
|
||||
impl GethClient {
|
||||
pub fn new(id: &str, api_url: &str) -> Result<Self> {
|
||||
let provider = Provider::<Http>::try_from(api_url)?;
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
Ok(Self {
|
||||
id: id.to_string(),
|
||||
provider,
|
||||
rt,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_block_trace_by_hash<T>(&mut self, hash: &CommonHash) -> Result<T>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + Debug + Send,
|
||||
{
|
||||
log::info!(
|
||||
"{}: calling get_block_trace_by_hash, hash: {:#?}",
|
||||
self.id,
|
||||
hash
|
||||
);
|
||||
|
||||
let trace_future = self
|
||||
.provider
|
||||
.request("scroll_getBlockTraceByNumberOrHash", [format!("{hash:#x}")]);
|
||||
|
||||
let trace = self.rt.block_on(trace_future)?;
|
||||
Ok(trace)
|
||||
}
|
||||
|
||||
pub fn block_number(&mut self) -> Result<BlockNumber> {
|
||||
log::info!("{}: calling block_number", self.id);
|
||||
|
||||
let trace_future = self.provider.request("eth_blockNumber", ());
|
||||
|
||||
let trace = self.rt.block_on(trace_future)?;
|
||||
Ok(trace)
|
||||
}
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use ethers_core::{
|
||||
k256::{
|
||||
ecdsa::{signature::hazmat::PrehashSigner, RecoveryId, Signature, SigningKey},
|
||||
elliptic_curve::{sec1::ToEncodedPoint, FieldBytes},
|
||||
PublicKey, Secp256k1, SecretKey,
|
||||
},
|
||||
types::Signature as EthSignature,
|
||||
};
|
||||
|
||||
use ethers_core::types::{H256, U256};
|
||||
use hex::ToHex;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
pub struct KeySigner {
|
||||
public_key: PublicKey,
|
||||
signer: SigningKey,
|
||||
}
|
||||
|
||||
impl KeySigner {
|
||||
pub fn new(key_path: &str, passwd: &str) -> Result<Self> {
|
||||
let p = Path::new(key_path);
|
||||
|
||||
let secret = if !p.exists() {
|
||||
log::info!("[key_signer] key_path not exists, create one");
|
||||
let dir = p.parent().unwrap();
|
||||
let name = p.file_name().and_then(|s| s.to_str());
|
||||
let mut rng = rand::thread_rng();
|
||||
let (secret, _) = eth_keystore::new(dir, &mut rng, passwd, name)?;
|
||||
secret
|
||||
} else {
|
||||
log::info!("[key_signer] key_path already exists, load it");
|
||||
eth_keystore::decrypt_key(key_path, passwd).map_err(|e| anyhow::anyhow!(e))?
|
||||
};
|
||||
|
||||
let secret_key = SecretKey::from_bytes(secret.as_slice().into())?;
|
||||
|
||||
let signer = SigningKey::from(secret_key.clone());
|
||||
|
||||
Ok(Self {
|
||||
public_key: secret_key.public_key(),
|
||||
signer,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_public_key(&self) -> String {
|
||||
let v: Vec<u8> = Vec::from(self.public_key.to_encoded_point(true).as_bytes());
|
||||
buffer_to_hex(&v, false)
|
||||
}
|
||||
|
||||
/// Signs the provided hash.
|
||||
pub fn sign_hash(&self, hash: H256) -> Result<EthSignature> {
|
||||
let signer = &self.signer as &dyn PrehashSigner<(Signature, RecoveryId)>;
|
||||
let (recoverable_sig, recovery_id) = signer.sign_prehash(hash.as_ref())?;
|
||||
|
||||
let v = u8::from(recovery_id) as u64;
|
||||
|
||||
let r_bytes: FieldBytes<Secp256k1> = recoverable_sig.r().into();
|
||||
let s_bytes: FieldBytes<Secp256k1> = recoverable_sig.s().into();
|
||||
let r = U256::from_big_endian(r_bytes.as_slice());
|
||||
let s = U256::from_big_endian(s_bytes.as_slice());
|
||||
|
||||
Ok(EthSignature { r, s, v })
|
||||
}
|
||||
|
||||
pub fn sign_buffer<T>(&self, buffer: &T) -> Result<String>
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
let pre_hash = keccak256(buffer);
|
||||
|
||||
let hash = H256::from(pre_hash);
|
||||
let sig = self.sign_hash(hash)?;
|
||||
|
||||
Ok(buffer_to_hex(&sig.to_vec(), true))
|
||||
}
|
||||
}
|
||||
|
||||
fn buffer_to_hex<T>(buffer: &T, has_prefix: bool) -> String
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
if has_prefix {
|
||||
format!("0x{}", buffer.encode_hex::<String>())
|
||||
} else {
|
||||
buffer.encode_hex::<String>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the Keccak-256 hash of input bytes.
|
||||
///
|
||||
/// Note that strings are interpreted as UTF-8 bytes,
|
||||
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> [u8; 32] {
|
||||
let mut output = [0u8; 32];
|
||||
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(bytes.as_ref());
|
||||
hasher.finalize(&mut output);
|
||||
|
||||
output
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
mod config;
|
||||
mod coordinator_client;
|
||||
mod geth_client;
|
||||
mod key_signer;
|
||||
mod prover;
|
||||
mod task_cache;
|
||||
mod task_processor;
|
||||
mod types;
|
||||
mod utils;
|
||||
mod version;
|
||||
mod zk_circuits_handler;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::{ArgAction, Parser};
|
||||
use config::{AssetsDirEnvConfig, Config};
|
||||
use prover::Prover;
|
||||
use std::rc::Rc;
|
||||
use task_cache::{ClearCacheCoordinatorListener, TaskCache};
|
||||
use task_processor::TaskProcessor;
|
||||
|
||||
/// Simple program to greet a person
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(disable_version_flag = true)]
|
||||
struct Args {
|
||||
/// Path of config file
|
||||
#[arg(long = "config", default_value = "conf/config.json")]
|
||||
config_file: String,
|
||||
|
||||
/// Version of this prover
|
||||
#[arg(short, long, action = ArgAction::SetTrue)]
|
||||
version: bool,
|
||||
|
||||
/// Path of log file
|
||||
#[arg(long = "log.file")]
|
||||
log_file: Option<String>,
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
|
||||
if args.version {
|
||||
println!("version is {}", version::get_version());
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
utils::log_init(args.log_file);
|
||||
|
||||
let config: Config = Config::from_file(args.config_file)?;
|
||||
|
||||
if let Err(e) = AssetsDirEnvConfig::init() {
|
||||
log::error!("AssetsDirEnvConfig init failed: {:#}", e);
|
||||
std::process::exit(-2);
|
||||
}
|
||||
|
||||
let task_cache = Rc::new(TaskCache::new(&config.db_path)?);
|
||||
|
||||
let coordinator_listener = Box::new(ClearCacheCoordinatorListener {
|
||||
task_cache: task_cache.clone(),
|
||||
});
|
||||
|
||||
let prover = Prover::new(&config, coordinator_listener)?;
|
||||
|
||||
log::info!(
|
||||
"prover start successfully. name: {}, type: {:?}, publickey: {}, version: {}",
|
||||
config.prover_name,
|
||||
config.proof_type,
|
||||
prover.get_public_key(),
|
||||
version::get_version(),
|
||||
);
|
||||
|
||||
let task_processor = TaskProcessor::new(&prover, task_cache);
|
||||
|
||||
task_processor.start();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
use anyhow::{bail, Context, Error, Ok, Result};
|
||||
use ethers_core::types::U64;
|
||||
|
||||
use std::{cell::RefCell, rc::Rc};
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
coordinator_client::{listener::Listener, types::*, CoordinatorClient},
|
||||
geth_client::GethClient,
|
||||
key_signer::KeySigner,
|
||||
types::{ProofFailureType, ProofStatus, ProofType},
|
||||
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
|
||||
};
|
||||
|
||||
use super::types::{ProofDetail, Task};
|
||||
|
||||
pub struct Prover<'a> {
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
circuits_handler_provider: RefCell<CircuitsHandlerProvider<'a>>,
|
||||
coordinator_client: RefCell<CoordinatorClient<'a>>,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
}
|
||||
|
||||
impl<'a> Prover<'a> {
|
||||
pub fn new(config: &'a Config, coordinator_listener: Box<dyn Listener>) -> Result<Self> {
|
||||
let proof_type = config.proof_type;
|
||||
let keystore_path = &config.keystore_path;
|
||||
let keystore_password = &config.keystore_password;
|
||||
|
||||
let key_signer = Rc::new(KeySigner::new(keystore_path, keystore_password)?);
|
||||
let coordinator_client =
|
||||
CoordinatorClient::new(config, Rc::clone(&key_signer), coordinator_listener)
|
||||
.context("failed to create coordinator_client")?;
|
||||
|
||||
let geth_client = if config.proof_type == ProofType::Chunk {
|
||||
Some(Rc::new(RefCell::new(
|
||||
GethClient::new(
|
||||
&config.prover_name,
|
||||
&config.l2geth.as_ref().unwrap().endpoint,
|
||||
)
|
||||
.context("failed to create l2 geth_client")?,
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let provider = CircuitsHandlerProvider::new(proof_type, config, geth_client.clone())
|
||||
.context("failed to create circuits handler provider")?;
|
||||
|
||||
let prover = Prover {
|
||||
config,
|
||||
key_signer: Rc::clone(&key_signer),
|
||||
circuits_handler_provider: RefCell::new(provider),
|
||||
coordinator_client: RefCell::new(coordinator_client),
|
||||
geth_client,
|
||||
};
|
||||
|
||||
Ok(prover)
|
||||
}
|
||||
|
||||
pub fn get_proof_type(&self) -> ProofType {
|
||||
self.config.proof_type
|
||||
}
|
||||
|
||||
pub fn get_public_key(&self) -> String {
|
||||
self.key_signer.get_public_key()
|
||||
}
|
||||
|
||||
pub fn fetch_task(&self) -> Result<Task> {
|
||||
log::info!("[prover] start to fetch_task");
|
||||
let mut req = GetTaskRequest {
|
||||
task_type: self.get_proof_type(),
|
||||
prover_height: None,
|
||||
vks: self.circuits_handler_provider.borrow().get_vks(),
|
||||
};
|
||||
|
||||
if self.get_proof_type() == ProofType::Chunk {
|
||||
let latest_block_number = self.get_latest_block_number_value()?;
|
||||
if let Some(v) = latest_block_number {
|
||||
if v.as_u64() == 0 {
|
||||
bail!("omit to prove task of the genesis block")
|
||||
}
|
||||
req.prover_height = Some(v.as_u64());
|
||||
} else {
|
||||
log::error!("[prover] failed to fetch latest confirmed block number, got None");
|
||||
bail!("failed to fetch latest confirmed block number, got None")
|
||||
}
|
||||
}
|
||||
let resp = self.coordinator_client.borrow_mut().get_task(&req)?;
|
||||
|
||||
match resp.data {
|
||||
Some(d) => Ok(Task::from(d)),
|
||||
None => {
|
||||
bail!("data of get_task empty, while error_code is success. there may be something wrong in response data or inner logic.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prove_task(&self, task: &Task) -> Result<ProofDetail> {
|
||||
log::info!("[prover] start to prove_task, task id: {}", task.id);
|
||||
let handler: Rc<Box<dyn CircuitsHandler>> = self
|
||||
.circuits_handler_provider
|
||||
.borrow_mut()
|
||||
.get_circuits_handler(&task.hard_fork_name)
|
||||
.context("failed to get circuit handler")?;
|
||||
self.do_prove(task, handler)
|
||||
}
|
||||
|
||||
fn do_prove(&self, task: &Task, handler: Rc<Box<dyn CircuitsHandler>>) -> Result<ProofDetail> {
|
||||
let mut proof_detail = ProofDetail {
|
||||
id: task.id.clone(),
|
||||
proof_type: task.task_type,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
proof_detail.proof_data = handler.get_proof_data(task.task_type, task)?;
|
||||
Ok(proof_detail)
|
||||
}
|
||||
|
||||
pub fn submit_proof(&self, proof_detail: ProofDetail, task: &Task) -> Result<()> {
|
||||
log::info!(
|
||||
"[prover] start to submit_proof, task id: {}",
|
||||
proof_detail.id
|
||||
);
|
||||
|
||||
let request = SubmitProofRequest {
|
||||
uuid: task.uuid.clone(),
|
||||
task_id: proof_detail.id,
|
||||
task_type: proof_detail.proof_type,
|
||||
status: ProofStatus::Ok,
|
||||
proof: proof_detail.proof_data,
|
||||
hard_fork_name: task.hard_fork_name.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.do_submit(&request)
|
||||
}
|
||||
|
||||
pub fn submit_error(
|
||||
&self,
|
||||
task: &Task,
|
||||
failure_type: ProofFailureType,
|
||||
error: Error,
|
||||
) -> Result<()> {
|
||||
log::info!("[prover] start to submit_error, task id: {}", task.id);
|
||||
let request = SubmitProofRequest {
|
||||
uuid: task.uuid.clone(),
|
||||
task_id: task.id.clone(),
|
||||
task_type: task.task_type,
|
||||
status: ProofStatus::Error,
|
||||
failure_type: Some(failure_type),
|
||||
failure_msg: Some(error.to_string()),
|
||||
hard_fork_name: task.hard_fork_name.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
self.do_submit(&request)
|
||||
}
|
||||
|
||||
fn do_submit(&self, request: &SubmitProofRequest) -> Result<()> {
|
||||
self.coordinator_client.borrow_mut().submit_proof(request)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_latest_block_number_value(&self) -> Result<Option<U64>> {
|
||||
let number = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.block_number()?;
|
||||
Ok(number.as_number())
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
use anyhow::{Ok, Result};
|
||||
|
||||
use super::coordinator_client::{listener::Listener, types::SubmitProofRequest};
|
||||
use crate::types::TaskWrapper;
|
||||
use sled::{Config, Db};
|
||||
use std::rc::Rc;
|
||||
|
||||
pub struct TaskCache {
|
||||
db: Db,
|
||||
}
|
||||
|
||||
impl TaskCache {
|
||||
pub fn new(db_path: &String) -> Result<Self> {
|
||||
let config = Config::new().path(db_path);
|
||||
let db = config.open()?;
|
||||
log::info!("[task_cache] initiate successfully to {db_path}");
|
||||
Ok(Self { db })
|
||||
}
|
||||
|
||||
pub fn put_task(&self, task_wrapper: &TaskWrapper) -> Result<()> {
|
||||
let k = task_wrapper.task.id.clone().into_bytes();
|
||||
let v = serde_json::to_vec(task_wrapper)?;
|
||||
self.db.insert(k, v)?;
|
||||
log::info!(
|
||||
"[task_cache] put_task with task_id: {}",
|
||||
task_wrapper.task.id
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_last_task(&self) -> Result<Option<TaskWrapper>> {
|
||||
let last = self.db.last()?;
|
||||
if let Some((k, v)) = last {
|
||||
let kk = std::str::from_utf8(k.as_ref())?;
|
||||
let task_wrapper: TaskWrapper = serde_json::from_slice(v.as_ref())?;
|
||||
log::info!(
|
||||
"[task_cache] get_last_task with task_id: {kk}, count: {}",
|
||||
task_wrapper.get_count()
|
||||
);
|
||||
return Ok(Some(task_wrapper));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub fn delete_task(&self, task_id: String) -> Result<()> {
|
||||
let k = task_id.clone().into_bytes();
|
||||
self.db.remove(k)?;
|
||||
log::info!("[task cache] delete_task with task_id: {task_id}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ========================= listener ===========================
|
||||
|
||||
pub struct ClearCacheCoordinatorListener {
|
||||
pub task_cache: Rc<TaskCache>,
|
||||
}
|
||||
|
||||
impl Listener for ClearCacheCoordinatorListener {
|
||||
fn on_proof_submitted(&self, req: &SubmitProofRequest) {
|
||||
let result = self.task_cache.delete_task(req.task_id.clone());
|
||||
if let Err(e) = result {
|
||||
log::error!("delete task from embed db failed, {:#}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
use super::{prover::Prover, task_cache::TaskCache};
|
||||
use anyhow::{Context, Result};
|
||||
use std::rc::Rc;
|
||||
|
||||
pub struct TaskProcessor<'a> {
|
||||
prover: &'a Prover<'a>,
|
||||
task_cache: Rc<TaskCache>,
|
||||
}
|
||||
|
||||
impl<'a> TaskProcessor<'a> {
|
||||
pub fn new(prover: &'a Prover<'a>, task_cache: Rc<TaskCache>) -> Self {
|
||||
TaskProcessor { prover, task_cache }
|
||||
}
|
||||
|
||||
pub fn start(&self) {
|
||||
loop {
|
||||
log::info!("start a new round.");
|
||||
if let Err(err) = self.prove_and_submit() {
|
||||
log::error!("encounter error: {:#}", err);
|
||||
} else {
|
||||
log::info!("prove & submit succeed.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn prove_and_submit(&self) -> Result<()> {
|
||||
let task_from_cache = self
|
||||
.task_cache
|
||||
.get_last_task()
|
||||
.context("failed to peek from stack")?;
|
||||
|
||||
let mut task_wrapper = match task_from_cache {
|
||||
Some(t) => t,
|
||||
None => {
|
||||
let fetch_result = self.prover.fetch_task();
|
||||
if let Err(err) = fetch_result {
|
||||
std::thread::sleep(core::time::Duration::from_secs(10));
|
||||
return Err(err).context("failed to fetch task from coordinator");
|
||||
}
|
||||
fetch_result.unwrap().into()
|
||||
}
|
||||
};
|
||||
|
||||
if task_wrapper.get_count() <= 2 {
|
||||
task_wrapper.increment_count();
|
||||
self.task_cache
|
||||
.put_task(&task_wrapper)
|
||||
.context("failed to push task into stack, updating count")?;
|
||||
|
||||
log::info!(
|
||||
"start to prove task, task_type: {:?}, task_id: {}",
|
||||
task_wrapper.task.task_type,
|
||||
task_wrapper.task.id
|
||||
);
|
||||
let result = match self.prover.prove_task(&task_wrapper.task) {
|
||||
Ok(proof_detail) => self.prover.submit_proof(proof_detail, &task_wrapper.task),
|
||||
Err(error) => self.prover.submit_error(
|
||||
&task_wrapper.task,
|
||||
super::types::ProofFailureType::NoPanic,
|
||||
error,
|
||||
),
|
||||
};
|
||||
return result;
|
||||
}
|
||||
|
||||
// if tried times >= 3, it's probably due to circuit proving panic
|
||||
log::error!(
|
||||
"zk proving panic for task, task_type: {:?}, task_id: {}",
|
||||
task_wrapper.task.task_type,
|
||||
task_wrapper.task.id
|
||||
);
|
||||
self.prover.submit_error(
|
||||
&task_wrapper.task,
|
||||
super::types::ProofFailureType::Panic,
|
||||
anyhow::anyhow!("zk proving panic for task"),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
use ethers_core::types::H256;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::coordinator_client::types::GetTaskResponseData;
|
||||
|
||||
pub type CommonHash = H256;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofType {
|
||||
Undefined,
|
||||
Chunk,
|
||||
Batch,
|
||||
}
|
||||
|
||||
impl ProofType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProofType::Chunk,
|
||||
2 => ProofType::Batch,
|
||||
_ => ProofType::Undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofType::Undefined => serializer.serialize_i8(0),
|
||||
ProofType::Chunk => serializer.serialize_i8(1),
|
||||
ProofType::Batch => serializer.serialize_i8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofType {
|
||||
fn default() -> Self {
|
||||
Self::Undefined
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct Task {
|
||||
pub uuid: String,
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub task_type: ProofType,
|
||||
pub task_data: String,
|
||||
#[serde(default)]
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
impl From<GetTaskResponseData> for Task {
|
||||
fn from(value: GetTaskResponseData) -> Self {
|
||||
Self {
|
||||
uuid: value.uuid,
|
||||
id: value.task_id,
|
||||
task_type: value.task_type,
|
||||
task_data: value.task_data,
|
||||
hard_fork_name: value.hard_fork_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct TaskWrapper {
|
||||
pub task: Task,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl TaskWrapper {
|
||||
pub fn increment_count(&mut self) {
|
||||
self.count += 1;
|
||||
}
|
||||
|
||||
pub fn get_count(&self) -> usize {
|
||||
self.count
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Task> for TaskWrapper {
|
||||
fn from(task: Task) -> Self {
|
||||
TaskWrapper { task, count: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct ProofDetail {
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub proof_type: ProofType,
|
||||
pub proof_data: String,
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofFailureType {
|
||||
Undefined,
|
||||
Panic,
|
||||
NoPanic,
|
||||
}
|
||||
|
||||
impl ProofFailureType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProofFailureType::Panic,
|
||||
2 => ProofFailureType::NoPanic,
|
||||
_ => ProofFailureType::Undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofFailureType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofFailureType::Undefined => serializer.serialize_u8(0),
|
||||
ProofFailureType::Panic => serializer.serialize_u8(1),
|
||||
ProofFailureType::NoPanic => serializer.serialize_u8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofFailureType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofFailureType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofFailureType {
|
||||
fn default() -> Self {
|
||||
Self::Undefined
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofStatus {
|
||||
Ok,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl ProofStatus {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
0 => ProofStatus::Ok,
|
||||
_ => ProofStatus::Error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofStatus {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofStatus::Ok => serializer.serialize_u8(0),
|
||||
ProofStatus::Error => serializer.serialize_u8(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofStatus {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofStatus::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofStatus {
|
||||
fn default() -> Self {
|
||||
Self::Ok
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
use env_logger::Env;
|
||||
use std::{fs::OpenOptions, sync::Once};
|
||||
|
||||
static LOG_INIT: Once = Once::new();
|
||||
|
||||
/// Initialize log
|
||||
pub fn log_init(log_file: Option<String>) {
|
||||
LOG_INIT.call_once(|| {
|
||||
let mut builder = env_logger::Builder::from_env(Env::default().default_filter_or("info"));
|
||||
if let Some(file_path) = log_file {
|
||||
let target = Box::new(
|
||||
OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.open(file_path)
|
||||
.expect("Can't create log file"),
|
||||
);
|
||||
builder.target(env_logger::Target::Pipe(target));
|
||||
}
|
||||
builder.init();
|
||||
});
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
use std::cell::OnceCell;
|
||||
|
||||
static DEFAULT_COMMIT: &str = "unknown";
|
||||
static mut VERSION: OnceCell<String> = OnceCell::new();
|
||||
|
||||
pub const TAG: &str = "v0.0.0";
|
||||
pub const DEFAULT_ZK_VERSION: &str = "000000-000000";
|
||||
|
||||
fn init_version() -> String {
|
||||
let commit = option_env!("GIT_REV").unwrap_or(DEFAULT_COMMIT);
|
||||
let tag = option_env!("GO_TAG").unwrap_or(TAG);
|
||||
let zk_version = option_env!("ZK_VERSION").unwrap_or(DEFAULT_ZK_VERSION);
|
||||
format!("{tag}-{commit}-{zk_version}")
|
||||
}
|
||||
|
||||
pub fn get_version() -> String {
|
||||
unsafe { VERSION.get_or_init(init_version).clone() }
|
||||
}
|
||||
@@ -1,172 +0,0 @@
|
||||
mod bernoulli;
|
||||
mod curie;
|
||||
|
||||
use super::geth_client::GethClient;
|
||||
use crate::{
|
||||
config::{AssetsDirEnvConfig, Config},
|
||||
types::{ProofType, Task},
|
||||
};
|
||||
use anyhow::{bail, Result};
|
||||
use bernoulli::BaseCircuitsHandler;
|
||||
use curie::NextCircuitsHandler;
|
||||
use std::{cell::RefCell, collections::HashMap, rc::Rc};
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
pub mod utils {
|
||||
pub fn encode_vk(vk: Vec<u8>) -> String {
|
||||
base64::encode(vk)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>>;
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &Task) -> Result<String>;
|
||||
}
|
||||
|
||||
type CircuitsHandlerBuilder = fn(
|
||||
proof_type: ProofType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Box<dyn CircuitsHandler>>;
|
||||
|
||||
pub struct CircuitsHandlerProvider<'a> {
|
||||
proof_type: ProofType,
|
||||
config: &'a Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
circuits_handler_builder_map: HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
|
||||
current_hard_fork_name: Option<HardForkName>,
|
||||
current_circuit: Option<Rc<Box<dyn CircuitsHandler>>>,
|
||||
vks: Vec<String>,
|
||||
}
|
||||
|
||||
impl<'a> CircuitsHandlerProvider<'a> {
|
||||
pub fn new(
|
||||
proof_type: ProofType,
|
||||
config: &'a Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
let mut m: HashMap<HardForkName, CircuitsHandlerBuilder> = HashMap::new();
|
||||
|
||||
fn handler_builder(
|
||||
proof_type: ProofType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!(
|
||||
"now init zk circuits handler, hard_fork_name: {}",
|
||||
&config.low_version_circuit.hard_fork_name
|
||||
);
|
||||
AssetsDirEnvConfig::enable_first();
|
||||
BaseCircuitsHandler::new(
|
||||
proof_type,
|
||||
&config.low_version_circuit.params_path,
|
||||
&config.low_version_circuit.assets_path,
|
||||
geth_client,
|
||||
)
|
||||
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
m.insert(
|
||||
config.low_version_circuit.hard_fork_name.clone(),
|
||||
handler_builder,
|
||||
);
|
||||
|
||||
fn next_handler_builder(
|
||||
proof_type: ProofType,
|
||||
config: &Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!(
|
||||
"now init zk circuits handler, hard_fork_name: {}",
|
||||
&config.high_version_circuit.hard_fork_name
|
||||
);
|
||||
AssetsDirEnvConfig::enable_second();
|
||||
NextCircuitsHandler::new(
|
||||
proof_type,
|
||||
&config.high_version_circuit.params_path,
|
||||
&config.high_version_circuit.assets_path,
|
||||
geth_client,
|
||||
)
|
||||
.map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
|
||||
m.insert(
|
||||
config.high_version_circuit.hard_fork_name.clone(),
|
||||
next_handler_builder,
|
||||
);
|
||||
|
||||
let vks = CircuitsHandlerProvider::init_vks(proof_type, config, &m, geth_client.clone());
|
||||
|
||||
let provider = CircuitsHandlerProvider {
|
||||
proof_type,
|
||||
config,
|
||||
geth_client,
|
||||
circuits_handler_builder_map: m,
|
||||
current_hard_fork_name: None,
|
||||
current_circuit: None,
|
||||
vks,
|
||||
};
|
||||
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
pub fn get_circuits_handler(
|
||||
&mut self,
|
||||
hard_fork_name: &String,
|
||||
) -> Result<Rc<Box<dyn CircuitsHandler>>> {
|
||||
match &self.current_hard_fork_name {
|
||||
Some(name) if name == hard_fork_name => {
|
||||
log::info!("get circuits handler from cache");
|
||||
if let Some(handler) = &self.current_circuit {
|
||||
Ok(handler.clone())
|
||||
} else {
|
||||
log::error!("missing cached handler, there must be something wrong.");
|
||||
bail!("missing cached handler, there must be something wrong.")
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
log::info!(
|
||||
"failed to get circuits handler from cache, create a new one: {hard_fork_name}"
|
||||
);
|
||||
if let Some(builder) = self.circuits_handler_builder_map.get(hard_fork_name) {
|
||||
log::info!("building circuits handler for {hard_fork_name}");
|
||||
let handler = builder(self.proof_type, self.config, self.geth_client.clone())
|
||||
.expect("failed to build circuits handler");
|
||||
self.current_hard_fork_name = Some(hard_fork_name.clone());
|
||||
let rc_handler = Rc::new(handler);
|
||||
self.current_circuit = Some(rc_handler.clone());
|
||||
Ok(rc_handler)
|
||||
} else {
|
||||
log::error!("missing builder, there must be something wrong.");
|
||||
bail!("missing builder, there must be something wrong.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn init_vks(
|
||||
proof_type: ProofType,
|
||||
config: &'a Config,
|
||||
circuits_handler_builder_map: &HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Vec<String> {
|
||||
circuits_handler_builder_map
|
||||
.iter()
|
||||
.map(|(hard_fork_name, build)| {
|
||||
let handler = build(proof_type, config, geth_client.clone())
|
||||
.expect("failed to build circuits handler");
|
||||
let vk = handler
|
||||
.get_vk(proof_type)
|
||||
.map_or("".to_string(), utils::encode_vk);
|
||||
log::info!("vk for {hard_fork_name} is {vk}");
|
||||
vk
|
||||
})
|
||||
.collect::<Vec<String>>()
|
||||
}
|
||||
|
||||
pub fn get_vks(&self) -> Vec<String> {
|
||||
self.vks.clone()
|
||||
}
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
use super::CircuitsHandler;
|
||||
use crate::{geth_client::GethClient, types::ProofType};
|
||||
use anyhow::{bail, Ok, Result};
|
||||
use once_cell::sync::Lazy;
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::types::{CommonHash, Task};
|
||||
use prover::{
|
||||
aggregator::Prover as BatchProver, zkevm::Prover as ChunkProver, BlockTrace, ChunkHash,
|
||||
ChunkProof,
|
||||
};
|
||||
use std::{cell::RefCell, cmp::Ordering, env, rc::Rc};
|
||||
|
||||
// Only used for debugging.
|
||||
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
|
||||
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkHash>,
|
||||
pub chunk_proofs: Vec<ChunkProof>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
fn get_block_number(block_trace: &BlockTrace) -> Option<u64> {
|
||||
block_trace.header.number.map(|n| n.as_u64())
|
||||
}
|
||||
|
||||
pub struct BaseCircuitsHandler {
|
||||
chunk_prover: Option<RefCell<ChunkProver>>,
|
||||
batch_prover: Option<RefCell<BatchProver>>,
|
||||
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
}
|
||||
|
||||
impl BaseCircuitsHandler {
|
||||
pub fn new(
|
||||
proof_type: ProofType,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
match proof_type {
|
||||
ProofType::Chunk => Ok(Self {
|
||||
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
|
||||
batch_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
|
||||
ProofType::Batch => Ok(Self {
|
||||
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
|
||||
chunk_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
_ => bail!("proof type invalid"),
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_chunk_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
let chunk_trace = self.gen_chunk_traces(task)?;
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk_proof = prover.borrow_mut().gen_chunk_proof(
|
||||
chunk_trace,
|
||||
None,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return serde_json::to_string(&chunk_proof).map_err(|e| anyhow::anyhow!(e));
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_batch_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
let chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)> =
|
||||
self.gen_chunk_hashes_proofs(task)?;
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let is_valid = prover.borrow_mut().check_chunk_proofs(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol, task-id: {}", &task.id)
|
||||
}
|
||||
let batch_proof = prover.borrow_mut().gen_agg_evm_proof(
|
||||
chunk_hashes_proofs,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return serde_json::to_string(&batch_proof).map_err(|e| anyhow::anyhow!(e));
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
|
||||
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
|
||||
let chunk_task_detail: ChunkTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
self.get_sorted_traces_by_hashes(&chunk_task_detail.block_hashes)
|
||||
}
|
||||
|
||||
fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result<Vec<(ChunkHash, ChunkProof)>> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
|
||||
Ok(batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.chunk_proofs.clone())
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn get_sorted_traces_by_hashes(&self, block_hashes: &[CommonHash]) -> Result<Vec<BlockTrace>> {
|
||||
if block_hashes.is_empty() {
|
||||
log::error!("[prover] failed to get sorted traces: block_hashes are empty");
|
||||
bail!("block_hashes are empty")
|
||||
}
|
||||
|
||||
let mut block_traces = Vec::new();
|
||||
for hash in block_hashes.iter() {
|
||||
let trace = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.get_block_trace_by_hash(hash)?;
|
||||
block_traces.push(trace);
|
||||
}
|
||||
|
||||
block_traces.sort_by(|a, b| {
|
||||
if get_block_number(a).is_none() {
|
||||
Ordering::Less
|
||||
} else if get_block_number(b).is_none() {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
get_block_number(a)
|
||||
.unwrap()
|
||||
.cmp(&get_block_number(b).unwrap())
|
||||
}
|
||||
});
|
||||
|
||||
let block_numbers: Vec<u64> = block_traces
|
||||
.iter()
|
||||
.map(|trace| get_block_number(trace).unwrap_or(0))
|
||||
.collect();
|
||||
let mut i = 0;
|
||||
while i < block_numbers.len() - 1 {
|
||||
if block_numbers[i] + 1 != block_numbers[i + 1] {
|
||||
log::error!(
|
||||
"[prover] block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
);
|
||||
bail!(
|
||||
"block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
)
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
Ok(block_traces)
|
||||
}
|
||||
}
|
||||
|
||||
impl CircuitsHandler for BaseCircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
ProofType::Chunk => self
|
||||
.chunk_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
ProofType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &crate::types::Task) -> Result<String> {
|
||||
match task_type {
|
||||
ProofType::Chunk => self.gen_chunk_proof(task),
|
||||
ProofType::Batch => self.gen_batch_proof(task),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
use super::CircuitsHandler;
|
||||
use crate::{geth_client::GethClient, types::ProofType};
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::types::{CommonHash, Task};
|
||||
use std::{cell::RefCell, cmp::Ordering, rc::Rc};
|
||||
|
||||
use prover_next::{
|
||||
aggregator::Prover as BatchProver, check_chunk_hashes, zkevm::Prover as ChunkProver,
|
||||
BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof, ChunkProvingTask,
|
||||
};
|
||||
|
||||
use super::bernoulli::OUTPUT_DIR;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
pub chunk_proofs: Vec<ChunkProof>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
fn get_block_number(block_trace: &BlockTrace) -> Option<u64> {
|
||||
block_trace.header.number.map(|n| n.as_u64())
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct NextCircuitsHandler {
|
||||
chunk_prover: Option<RefCell<ChunkProver>>,
|
||||
batch_prover: Option<RefCell<BatchProver>>,
|
||||
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
}
|
||||
|
||||
impl NextCircuitsHandler {
|
||||
pub fn new(
|
||||
proof_type: ProofType,
|
||||
params_dir: &str,
|
||||
assets_dir: &str,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
) -> Result<Self> {
|
||||
match proof_type {
|
||||
ProofType::Chunk => Ok(Self {
|
||||
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
|
||||
batch_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
|
||||
ProofType::Batch => Ok(Self {
|
||||
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
|
||||
chunk_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
_ => bail!("proof type invalid"),
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_chunk_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
let chunk_trace = self.gen_chunk_traces(task)?;
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof =
|
||||
prover
|
||||
.borrow_mut()
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return serde_json::to_string(&chunk_proof).map_err(|e| anyhow::anyhow!(e));
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_batch_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> =
|
||||
self.gen_chunk_hashes_proofs(task)?;
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover.borrow_mut().check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol, task-id: {}", &task.id)
|
||||
}
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch = BatchProvingTask { chunk_proofs };
|
||||
let batch_proof =
|
||||
prover
|
||||
.borrow_mut()
|
||||
.gen_agg_evm_proof(batch, None, self.get_output_dir())?;
|
||||
|
||||
return serde_json::to_string(&batch_proof).map_err(|e| anyhow::anyhow!(e));
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
|
||||
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
|
||||
let chunk_task_detail: ChunkTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
self.get_sorted_traces_by_hashes(&chunk_task_detail.block_hashes)
|
||||
}
|
||||
|
||||
fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result<Vec<(ChunkInfo, ChunkProof)>> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
|
||||
Ok(batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.chunk_proofs.clone())
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn get_sorted_traces_by_hashes(&self, block_hashes: &[CommonHash]) -> Result<Vec<BlockTrace>> {
|
||||
if block_hashes.is_empty() {
|
||||
log::error!("[prover] failed to get sorted traces: block_hashes are empty");
|
||||
bail!("block_hashes are empty")
|
||||
}
|
||||
|
||||
let mut block_traces = Vec::new();
|
||||
for hash in block_hashes.iter() {
|
||||
let trace = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.get_block_trace_by_hash(hash)?;
|
||||
block_traces.push(trace);
|
||||
}
|
||||
|
||||
block_traces.sort_by(|a, b| {
|
||||
if get_block_number(a).is_none() {
|
||||
Ordering::Less
|
||||
} else if get_block_number(b).is_none() {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
get_block_number(a)
|
||||
.unwrap()
|
||||
.cmp(&get_block_number(b).unwrap())
|
||||
}
|
||||
});
|
||||
|
||||
let block_numbers: Vec<u64> = block_traces
|
||||
.iter()
|
||||
.map(|trace| get_block_number(trace).unwrap_or(0))
|
||||
.collect();
|
||||
let mut i = 0;
|
||||
while i < block_numbers.len() - 1 {
|
||||
if block_numbers[i] + 1 != block_numbers[i + 1] {
|
||||
log::error!(
|
||||
"[prover] block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
);
|
||||
bail!(
|
||||
"block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
)
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
Ok(block_traces)
|
||||
}
|
||||
}
|
||||
|
||||
impl CircuitsHandler for NextCircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
ProofType::Chunk => self
|
||||
.chunk_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
ProofType::Batch => self
|
||||
.batch_prover
|
||||
.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk()),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &crate::types::Task) -> Result<String> {
|
||||
match task_type {
|
||||
ProofType::Chunk => self.gen_chunk_proof(task),
|
||||
ProofType::Batch => self.gen_batch_proof(task),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
107
prover/store/stack.go
Normal file
107
prover/store/stack.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"go.etcd.io/bbolt"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEmpty empty error message
|
||||
ErrEmpty = errors.New("content is empty")
|
||||
)
|
||||
|
||||
// Stack is a first-input last-output db.
|
||||
type Stack struct {
|
||||
*bbolt.DB
|
||||
}
|
||||
|
||||
// ProvingTask is the value in stack.
|
||||
// It contains TaskMsg and proved times.
|
||||
type ProvingTask struct {
|
||||
Task *message.TaskMsg `json:"task"`
|
||||
// Times is how many times prover proved.
|
||||
Times int `json:"times"`
|
||||
}
|
||||
|
||||
var bucket = []byte("stack")
|
||||
|
||||
// NewStack new a Stack object.
|
||||
func NewStack(path string) (*Stack, error) {
|
||||
kvdb, err := bbolt.Open(path, 0666, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = kvdb.Update(func(tx *bbolt.Tx) error {
|
||||
_, err = tx.CreateBucketIfNotExists(bucket)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("init stack failed", "error", err)
|
||||
}
|
||||
return &Stack{DB: kvdb}, nil
|
||||
}
|
||||
|
||||
// Push appends the proving-task on the top of Stack.
|
||||
func (s *Stack) Push(task *ProvingTask) error {
|
||||
byt, err := json.Marshal(task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := []byte(task.Task.ID)
|
||||
return s.Update(func(tx *bbolt.Tx) error {
|
||||
return tx.Bucket(bucket).Put(key, byt)
|
||||
})
|
||||
}
|
||||
|
||||
// Peek return the top element of the Stack.
|
||||
func (s *Stack) Peek() (*ProvingTask, error) {
|
||||
var value []byte
|
||||
if err := s.View(func(tx *bbolt.Tx) error {
|
||||
bu := tx.Bucket(bucket)
|
||||
c := bu.Cursor()
|
||||
_, value = c.Last()
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(value) == 0 {
|
||||
return nil, ErrEmpty
|
||||
}
|
||||
|
||||
traces := &ProvingTask{}
|
||||
err := json.Unmarshal(value, traces)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return traces, nil
|
||||
}
|
||||
|
||||
// Delete pops the proving-task on the top of Stack.
|
||||
func (s *Stack) Delete(taskID string) error {
|
||||
return s.Update(func(tx *bbolt.Tx) error {
|
||||
bu := tx.Bucket(bucket)
|
||||
return bu.Delete([]byte(taskID))
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateTimes updates the prover prove times of the proving task.
|
||||
func (s *Stack) UpdateTimes(task *ProvingTask, updateTimes int) error {
|
||||
task.Times = updateTimes
|
||||
byt, err := json.Marshal(task)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling task: %v", err)
|
||||
}
|
||||
key := []byte(task.Task.ID)
|
||||
return s.Update(func(tx *bbolt.Tx) error {
|
||||
bu := tx.Bucket(bucket)
|
||||
c := bu.Cursor()
|
||||
key, _ = c.Last()
|
||||
return bu.Put(key, byt)
|
||||
})
|
||||
}
|
||||
71
prover/store/stack_test.go
Normal file
71
prover/store/stack_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
func TestStack(t *testing.T) {
|
||||
// Create temp path
|
||||
path, err := os.MkdirTemp("/tmp/", "stack_db_test-")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(path)
|
||||
|
||||
// Create stack db instance
|
||||
s, err := NewStack(filepath.Join(path, "test-stack"))
|
||||
assert.NoError(t, err)
|
||||
defer s.Close()
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
taskUUID, uuidErr := uuid.NewRandom()
|
||||
assert.NoError(t, uuidErr)
|
||||
task := &ProvingTask{
|
||||
Task: &message.TaskMsg{
|
||||
UUID: taskUUID.String(),
|
||||
ID: strconv.Itoa(i),
|
||||
},
|
||||
Times: 0,
|
||||
}
|
||||
|
||||
err = s.Push(task)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 2; i >= 0; i-- {
|
||||
var peek *ProvingTask
|
||||
peek, err = s.Peek()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, strconv.Itoa(i), peek.Task.ID)
|
||||
err = s.Delete(strconv.Itoa(i))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// test times
|
||||
taskUUID, uuidErr := uuid.NewRandom()
|
||||
assert.NoError(t, uuidErr)
|
||||
task := &ProvingTask{
|
||||
Task: &message.TaskMsg{
|
||||
UUID: taskUUID.String(),
|
||||
ID: strconv.Itoa(1),
|
||||
},
|
||||
Times: 0,
|
||||
}
|
||||
err = s.Push(task)
|
||||
assert.NoError(t, err)
|
||||
peek, err := s.Peek()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, peek.Times)
|
||||
err = s.UpdateTimes(peek, 3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
peek2, err := s.Peek()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, peek2.Times)
|
||||
}
|
||||
56
prover/utils/utils.go
Normal file
56
prover/utils/utils.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
type ethClient interface {
|
||||
BlockNumber(ctx context.Context) (uint64, error)
|
||||
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
|
||||
}
|
||||
|
||||
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
|
||||
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
|
||||
switch true {
|
||||
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
|
||||
var tag *big.Int
|
||||
if confirm == rpc.FinalizedBlockNumber {
|
||||
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
|
||||
} else {
|
||||
tag = big.NewInt(int64(rpc.SafeBlockNumber))
|
||||
}
|
||||
|
||||
header, err := client.HeaderByNumber(ctx, tag)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("client.HeaderByNumber failed: tag %v, err %v", tag, err)
|
||||
}
|
||||
if !header.Number.IsInt64() {
|
||||
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
|
||||
}
|
||||
return header.Number.Uint64(), nil
|
||||
case confirm == rpc.LatestBlockNumber:
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return number, nil
|
||||
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cfmNum := uint64(confirm.Int64())
|
||||
|
||||
if number >= cfmNum {
|
||||
return number - cfmNum, nil
|
||||
}
|
||||
return 0, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
|
||||
}
|
||||
}
|
||||
@@ -2,14 +2,29 @@
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
mock_abi:
|
||||
cd .. && solc --evm-version cancun --bin --abi --optimize --overwrite -o ./build/bin ./rollup/mock_bridge/MockBridge.sol
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --bin=./build/bin/MockBridge.bin --abi=./build/bin/MockBridge.abi --pkg=mock_bridge --out=./rollup/mock_bridge/MockBridge.go
|
||||
|
||||
rollup_bins: ## Builds the Rollup bins.
|
||||
libzstd:
|
||||
sudo mkdir -p $(SCROLL_LIB_PATH)/
|
||||
sudo wget -O $(SCROLL_LIB_PATH)/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/v0.0.0-rc0-ubuntu20.04/libzktrie.so
|
||||
sudo wget -O $(SCROLL_LIB_PATH)/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/v0.0.0-rc0-ubuntu20.04/libscroll_zstd.so
|
||||
|
||||
rollup_bins: libzstd ## Builds the Rollup bins.
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
event_watcher: ## Builds the event_watcher bin
|
||||
|
||||
@@ -101,7 +101,7 @@ var L2MessageQueueMetaData = &bind.MetaData{
|
||||
|
||||
// L1GasPriceOracleMetaData contains all meta data concerning the L1GasPriceOracle contract.
|
||||
var L1GasPriceOracleMetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"BlobScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"CommitScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BlobBaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"blobScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"commitScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BlobBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFeeAndBlobBaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
|
||||
ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]\n",
|
||||
}
|
||||
|
||||
// IL1ScrollMessengerL2MessageProof is an auto generated low-level Go binding around an user-defined struct.
|
||||
|
||||
@@ -70,15 +70,13 @@
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_row_consumption_per_chunk": 1048319,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"max_l1_commit_gas_per_batch": 11234567,
|
||||
"max_l1_commit_calldata_size_per_batch": 112345,
|
||||
"batch_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
|
||||
@@ -10,7 +10,7 @@ require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
||||
@@ -236,8 +236,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7 h1:CDrPMqifvAVyYqu0x1J5qickVV0b51tApPnOwDYLESI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703 h1:dcvPPyyfe3SocOBwgww3e1wcWjgF85kKDsohY4TXII0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user