Compare commits

..

72 Commits

Author SHA1 Message Date
Péter Garamvölgyi
f69fc7b5d5 enable isCurie on deployment 2024-07-09 14:21:14 +02:00
Péter Garamvölgyi
6e905352fc bump foundry version 2024-07-05 17:08:24 +02:00
Péter Garamvölgyi
0680c0d4d3 set Curie in genesis 2024-07-05 16:53:09 +02:00
Péter Garamvölgyi
29a3447d50 Merge branch 'develop' into feat-deterministic-deployment 2024-07-05 16:44:21 +02:00
Mengran Lan
16673e2b97 feat(bridge-history): change FinalizeBatch event logic (#1405)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: amoylan2 <amoylan2@users.noreply.github.com>
2024-07-02 00:40:04 +08:00
colin
a2536d5613 fix(coordinator): panic (#1407)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-06-29 06:01:44 +08:00
colin
5f31d28ced fix(coordinator): get empty task (#1406)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-06-28 23:17:35 +08:00
colin
eada1d05fe fix(Makefile): remove outdated prover in golang-related cmds (#1398) 2024-06-26 14:36:10 +08:00
sbaizet
2f39e37bc2 Ci coordinator api arm64 (#1383) 2024-06-24 09:17:22 +02:00
Zhang Zhuo
d454941c81 chore(libzkp): v0.11.4 (#1391)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-06-24 09:35:57 +08:00
Péter Garamvölgyi
977f5701fe fix REACT_APP_EXTERNAL_RPC_URI_L1 2024-06-18 20:13:07 +02:00
Mengran Lan
738c85759d feat(prover): add unit test for circuits e2e test (used in test_zkp project) (#1379) 2024-06-18 15:21:26 +08:00
Zhang Zhuo
27d627e318 feat(coordinator & prover): upgrade libzkp to v0.11.3 (#1384)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2024-06-18 10:39:32 +08:00
Kenn
8c3ecd395f chore: spelling fixes (#1378) 2024-06-18 09:31:20 +08:00
Hsiao_Jan
33016b1d5d fix(db): the function UpdateProvingStatusFailed proving_status determines the condition incorrectly (#1377)
Co-authored-by: xiaoranlu <xiaoranlu@tencent.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-06-14 10:01:33 +08:00
Péter Garamvölgyi
53d0389ba7 update base image to arm-friendly one 2024-06-13 17:26:56 +02:00
Zhang Zhuo
b824509773 chore(libzkp): upgrade to v0.11.1 (#1350)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: Mengran Lan <lanmengran@qq.com>
2024-06-13 18:00:26 +08:00
Ties
94ac1cd63f fix: set cancun as foundry evm version (#1369)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-06-13 15:31:26 +08:00
Mengran Lan
4ffb9e6c68 feat(prover): remove BlockTrace wrapper in geth_client.rs to make it more flexible for circuits (#1376) 2024-06-13 11:35:15 +08:00
Mengran Lan
874d3f2f8b feat: replace golang prover with rust prover (#1370)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-06-12 18:28:16 +08:00
Xi Lin
b0242c2938 feat(contracts): remove startBatchIndex check when updateVerifier in MultipleVersionRollupVerifier (#1372)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-06-12 17:05:08 +08:00
johnsonjie
6638c0b829 Update intermediate dockerfile (#1371) 2024-06-12 16:05:20 +08:00
johnsonjie
6dd09feff8 update intermediate docker workflow (#1296)
Co-authored-by: johnsonjie <xiaojie@scroll.io>
Co-authored-by: sebastien.baizet <sebastien.baizet@gmail.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-06-12 15:28:53 +08:00
Mengran Lan
75c81d5ce6 feat(prover): rewrite prover in rust (#1333)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Zhang Zhuo <mycinbrin@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-06-12 14:25:37 +08:00
Péter Garamvölgyi
15969d34ce update frontend URLs 2024-06-10 16:32:53 +02:00
sbaizet
0c137d6b6c ci - Docker release bridgehistoryapi db cli (#1368) 2024-06-07 16:52:09 +02:00
Mengran Lan
c65cdfceb9 feat: chunk proof add row usages (#1367) 2024-06-06 16:45:58 +08:00
Mengran Lan
71ab2006fb fix(coordinator): get hard_fork_name is empty string (#1363) 2024-06-05 23:42:17 +08:00
georgehao
60a98fa876 tweak coordinator log (#1362) 2024-06-05 23:06:30 +08:00
colin
661b68cf86 fix(rollup-relayer): update da-codec dependency (#1360) 2024-06-05 17:33:55 +08:00
colin
6eea9195fc feat: turn libscroll_zstd dynamic lib into static (#1358)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-06-05 16:59:56 +08:00
Mengran Lan
e45838f3ac feat(coordinator): coordinator support multi-circuits prover (#1351)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2024-06-05 14:35:48 +08:00
petercover
acd1432d44 chore: fix function name in comment (#1335)
Signed-off-by: petercover <raowanxiang@outlook.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-06-05 10:35:54 +08:00
Péter Garamvölgyi
fc0cb979f0 set mock finalization params 2024-06-04 20:33:01 +02:00
marlowl
6b11e20ca6 fix(test): context propagation fix in InsertBatch function (#1346)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-06-02 21:06:32 +08:00
Xi Lin
f12e8e3baf feat(contracts): change MAX_COMMIT_SCALAR and MAX_BLOB_SCALAR to 1e18 (#1354) 2024-06-01 10:42:07 +08:00
Péter Garamvölgyi
2b5b82d466 init frontend config 2024-05-31 09:38:56 +02:00
colin
ba77a74743 feat(rollup-relayer): support codecv2 (#1298)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: Zhang Zhuo <mycinbrin@gmail.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-05-30 19:00:40 +08:00
colin
1ddfe57e5b feat(gas-oracle): add gas price update after Curie (#1344) 2024-05-29 17:55:06 +08:00
Péter Garamvölgyi
c48ae961a5 feat(contracts): accept batches with version > 1 (#1317)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Xi Lin <zimpha@gmail.com>
2024-05-29 11:38:32 +08:00
Xi Lin
7059ad0ed4 feat(contracts): update L1GasPriceOracle for Curie fork (#1343)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2024-05-28 13:36:56 +08:00
Péter Garamvölgyi
e2b87879da add chain-monitor and balance-checker config gen 2024-05-22 15:35:37 +02:00
Péter Garamvölgyi
4ab710359d update redis config values 2024-05-22 14:52:34 +02:00
Péter Garamvölgyi
f27d27cb8a add coordinator-config.json 2024-05-21 16:55:15 +02:00
Péter Garamvölgyi
0e28f28a6f Merge branch 'develop' into feat-deterministic-deployment 2024-05-21 16:38:43 +02:00
Péter Garamvölgyi
cbd3f54b6b update scripts 2024-05-13 16:29:34 +01:00
Péter Garamvölgyi
bc1cb1f86e add missing files to container 2024-05-13 16:27:40 +01:00
Péter Garamvölgyi
5497777c88 update dockerfile 2024-05-13 16:18:15 +01:00
Péter Garamvölgyi
39db5634e2 wip: generate other config files 2024-05-07 16:30:52 +08:00
Péter Garamvölgyi
2d14457c91 make envs more configurable 2024-05-03 15:23:58 +08:00
Péter Garamvölgyi
5b3a65b208 use legacy txs on L2 2024-04-29 23:10:21 +08:00
Péter Garamvölgyi
53b14bc090 fix genesis extradata 2024-04-29 20:09:15 +08:00
Péter Garamvölgyi
180f3c63fb ensure l1ChainId is serialized as string 2024-04-29 19:06:30 +08:00
Péter Garamvölgyi
5e59373cf4 add simple build script 2024-04-28 13:11:20 +08:00
Péter Garamvölgyi
1d856821df small improvements 2024-04-28 13:01:44 +08:00
Péter Garamvölgyi
98d2c333bd move files 2024-04-28 12:27:16 +08:00
Péter Garamvölgyi
d4bbb252de enable cache 2024-04-28 10:37:36 +08:00
Péter Garamvölgyi
7ac34b3196 fmt 2024-04-27 20:23:56 +08:00
Péter Garamvölgyi
dfab7315df predeploy DeterministicDeploymentProxy 2024-04-27 20:19:22 +08:00
Péter Garamvölgyi
623cf34fa3 improve dockerfiles, better error messages 2024-04-27 20:03:14 +08:00
Péter Garamvölgyi
ad5c47509f docker 2024-04-26 21:10:05 +08:00
Péter Garamvölgyi
03fdd8eb05 move files 2024-04-26 19:49:37 +08:00
Péter Garamvölgyi
421cd7e96d clean up 2024-04-26 19:30:42 +08:00
Péter Garamvölgyi
31a636a66b simplify contract address prediction 2024-04-26 18:28:22 +08:00
Péter Garamvölgyi
3c6c86eaad move configurations 2024-04-26 18:10:41 +08:00
Péter Garamvölgyi
9f7841a468 add GenerateGenesis 2024-04-26 17:52:49 +08:00
Péter Garamvölgyi
e56d9a9cff fmt 2024-04-26 16:47:46 +08:00
Péter Garamvölgyi
7b2228898b add GenerateGenesisAlloc script 2024-04-26 16:38:52 +08:00
Péter Garamvölgyi
491aa91369 use toml config file 2024-04-26 15:59:01 +08:00
Péter Garamvölgyi
6eb58c1097 rename param 2024-04-25 18:14:14 +08:00
Péter Garamvölgyi
5c42eb381b disable bytecode_hash 2024-04-25 18:12:42 +08:00
Péter Garamvölgyi
febc8a7a38 feat: add deterministic, multi-layer deployment script 2024-04-25 17:02:28 +08:00
153 changed files with 12732 additions and 3783 deletions

View File

@@ -0,0 +1,41 @@
name: Docker-coordinator-api-arm64
on:
workflow_dispatch:
inputs:
tag:
description: "tag of this image (suffix -arm64 is added automatically)"
required: true
type: string
jobs:
build-and-push-arm64-image:
runs-on: ubuntu-latest
strategy:
matrix:
arch:
- aarch64
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up QEMU
run: |
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
docker buildx create --name multiarch --driver docker-container --use
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build docker image
uses: docker/build-push-action@v2
with:
platforms: linux/arm64
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: scrolltech/coordinator-api:${{inputs.tag}}-arm64

View File

@@ -46,7 +46,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/event_watcher.Dockerfile
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -91,7 +91,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/gas_oracle.Dockerfile
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -136,7 +136,7 @@ jobs:
with:
context: .
file: ./build/dockerfiles/rollup_relayer.Dockerfile
platforms: linux/amd64
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
@@ -279,6 +279,51 @@ jobs:
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
bridgehistoryapi-db-cli:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ env.AWS_REGION }}
- name: Login to Amazon ECR
id: login-ecr
uses: aws-actions/amazon-ecr-login@v2
- name: check repo and create it if not exist
env:
REPOSITORY: bridgehistoryapi-db-cli
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Build and push
uses: docker/build-push-action@v3
env:
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
REPOSITORY: bridgehistoryapi-db-cli
IMAGE_TAG: ${{ github.ref_name }}
with:
context: .
file: ./build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
coordinator-api:
runs-on: ubuntu-latest
steps:
@@ -316,7 +361,6 @@ jobs:
with:
context: .
file: ./build/dockerfiles/coordinator-api.Dockerfile
platforms: linux/amd64
push: true
tags: |
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}

View File

@@ -4,32 +4,65 @@ on:
workflow_dispatch:
inputs:
GO_VERSION:
description: 'Go version'
description: "Go version"
required: true
type: string
default: '1.21'
type: choice
options:
- "1.20"
- "1.21"
- "1.22"
- "1.23"
default: "1.21"
RUST_VERSION:
description: 'Rust toolchain version'
description: "Rust toolchain version"
required: true
type: string
default: 'nightly-2023-12-03'
type: choice
options:
- nightly-2023-12-03
- nightly-2022-12-10
default: "nightly-2023-12-03"
PYTHON_VERSION:
description: 'Python version'
description: "Python version"
required: false
type: string
default: '3.10'
type: choice
options:
- "3.10"
default: "3.10"
CUDA_VERSION:
description: 'Cuda version'
description: "Cuda version"
required: false
type: string
default: '11.7.1'
type: choice
options:
- "11.7.1"
- "12.2.2"
default: "11.7.1"
CARGO_CHEF_TAG:
description: "Cargo chef version"
required: true
default: "0.1.41"
type: choice
options:
- 0.1.41
BASE_IMAGE:
description: "which intermediate image you want to update"
required: true
default: "go-alpine-builder"
type: choice
options:
- cuda-go-rust-builder
- go-rust-builder
- go-alpine-builder
- rust-builder
- rust-alpine-builder
- go-rust-alpine-builder
- py-runner
defaults:
run:
working-directory: 'build/dockerfiles/intermediate'
working-directory: "build/dockerfiles/intermediate"
jobs:
build-and-publish-cuda-go-rust-builder:
build-and-publish-intermediate:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -43,177 +76,37 @@ jobs:
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: set tag env
run: |
if [ ${{github.event.inputs.BASE_IMAGE}} == "cuda-go-rust-builder" ]; then
echo "TAG=cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-rust-builder" ]; then
echo "TAG=go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-alpine-builder" ]; then
echo "TAG=${{ github.event.inputs.GO_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "rust-builder" ]; then
echo "TAG=${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "rust-alpine-builder" ]; then
echo "TAG=${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "go-rust-alpine-builder" ]; then
echo "TAG=go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}" >> $GITHUB_ENV
elif [ ${{github.event.inputs.BASE_IMAGE}} == "py-runner" ]; then
echo "TAG=${{ github.event.inputs.PYTHON_VERSION }}" >> $GITHUB_ENV
else
echo "no BASE_IMAGE match"
fi
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/cuda-go-rust-builder.Dockerfile
tags: scrolltech/cuda-go-rust-builder:cuda-${{ github.event.inputs.CUDA_VERSION }}-go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-builder.Dockerfile
file: build/dockerfiles/intermediate/${{ github.event.inputs.BASE_IMAGE }}.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
tags: scrolltech/${{ github.event.inputs.BASE_IMAGE }}:${{ env.TAG }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-alpine-builder:${{ github.event.inputs.GO_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/rust-alpine-builder:${{ github.event.inputs.RUST_VERSION }}
build-args: |
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-go-rust-alpine-builder:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/go-rust-alpine-builder.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/go-rust-alpine-builder:go-${{ github.event.inputs.GO_VERSION }}-rust-${{ github.event.inputs.RUST_VERSION }}
build-args: |
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
build-and-publish-py-runner:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build image
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
file: build/dockerfiles/intermediate/py-runner.Dockerfile
platforms: linux/amd64,linux/arm64
tags: scrolltech/py-runner:${{ github.event.inputs.PYTHON_VERSION }}
build-args: |
CUDA_VERSION: ${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION: ${{ github.event.inputs.GO_VERSION }}
RUST_VERSION: ${{ github.event.inputs.RUST_VERSION }}
CUDA_VERSION=${{ github.event.inputs.CUDA_VERSION }}
GO_VERSION=${{ github.event.inputs.GO_VERSION }}
RUST_VERSION=${{ github.event.inputs.RUST_VERSION }}
PYTHON_VERSION=${{ github.event.inputs.PYTHON_VERSION }}
CARGO_CHEF_TAG=${{ github.event.inputs.CARGO_CHEF_TAG }}

View File

@@ -25,78 +25,75 @@ defaults:
working-directory: 'prover'
jobs:
test:
if: github.event.pull_request.draft == false
skip_check:
runs-on: ubuntu-latest
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Test
run: |
go test -tags="mock_prover" -v -coverprofile=coverage.txt ./...
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
with:
flags: prover
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
cancel_others: 'true'
concurrent_skipping: 'same_content_newer'
paths_ignore: '["**/README.md"]'
fmt:
needs: [skip_check]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: rustfmt
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Cargo check
run: cargo check --all-features
- name: Cargo fmt
run: cargo fmt --all -- --check
clippy:
needs: [skip_check, fmt]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
components: clippy
- name: Cargo cache
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Run clippy
run: cargo clippy --all-features --all-targets -- -D warnings
compile:
if: github.event_name == 'push' # will only be triggered when pushing to main & staging & develop & alpha
needs: [skip_check, clippy]
if: |
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Test
run: |
make prover
check:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint
make lint
goimports-lint:
if: github.event.pull_request.draft == false
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.21.x
- name: Checkout code
uses: actions/checkout@v4
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports
- run: goimports -local scroll-tech/prover/ -w .
- run: go mod tidy
# If there are any diffs from goimports or go mod tidy, fail.
- name: Verify no changes from goimports and go mod tidy
run: |
if [ -n "$(git status --porcelain)" ]; then
exit 1
fi
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly-2023-12-03
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
workspaces: "prover -> target"
- name: Test
run: |
make prover

View File

@@ -105,7 +105,7 @@ jobs:
- name: Test rollup packages
working-directory: 'rollup'
run: |
./run_test.sh
make test
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
env:

4
.gitignore vendored
View File

@@ -4,6 +4,8 @@ assets/seed
# Built binaries
build/bin
verifier.test
core.test
coverage.txt
*.integration.txt
@@ -20,3 +22,5 @@ coverage.txt
# misc
sftp-config.json
*~
target

View File

@@ -12,7 +12,6 @@ update:
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
@@ -21,7 +20,6 @@ lint: ## The code's format and security checks.
make -C common lint
make -C coordinator lint
make -C database lint
make -C prover lint
make -C bridge-history-api lint
fmt: ## format the code
@@ -30,7 +28,6 @@ fmt: ## format the code
cd $(PWD)/common/ && go mod tidy
cd $(PWD)/coordinator/ && go mod tidy
cd $(PWD)/database/ && go mod tidy
cd $(PWD)/prover/ && go mod tidy
cd $(PWD)/rollup/ && go mod tidy
cd $(PWD)/tests/integration-test/ && go mod tidy
@@ -38,7 +35,6 @@ fmt: ## format the code
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w .
goimports -local $(PWD)/prover/ -w .
goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/tests/integration-test/ -w .

View File

@@ -46,19 +46,7 @@ make dev_docker
Run the tests using the following commands:
```bash
export LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
export SCROLL_LIB_PATH=/scroll/lib
sudo mkdir -p $SCROLL_LIB_PATH
sudo wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCROLL_LIB_PATH
export CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
go test -v -race -covermode=atomic scroll-tech/rollup/...
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
go test -v -race -covermode=atomic scroll-tech/database/...
go test -v -race -covermode=atomic scroll-tech/common/...

View File

@@ -147,7 +147,7 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
}
if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce {
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actuall next message nonce", l2WithdrawMessages[0].MessageNonce)
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actual next message nonce", l2WithdrawMessages[0].MessageNonce)
return fmt.Errorf("nonce mismatch")
}

View File

@@ -273,7 +273,6 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeFinalized),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
}

View File

@@ -73,6 +73,9 @@ func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, block
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent) error {
var maxFinalizedBatchIndex uint64
var containsFinalizedEvent bool
var maxL1BlockNumber uint64
for _, l1BatchEvent := range l1BatchEvents {
db := c.db
db = db.WithContext(ctx)
@@ -89,11 +92,13 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
}
case btypes.BatchStatusTypeFinalized:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
containsFinalizedEvent = true
// get the maxFinalizedBatchIndex, which signals all the batch before it are all finalized
if l1BatchEvent.BatchIndex > maxFinalizedBatchIndex {
maxFinalizedBatchIndex = l1BatchEvent.BatchIndex
}
if l1BatchEvent.L1BlockNumber > maxL1BlockNumber {
maxL1BlockNumber = l1BatchEvent.L1BlockNumber
}
case btypes.BatchStatusTypeReverted:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
@@ -108,6 +113,21 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
}
}
}
if containsFinalizedEvent {
db := c.db
db = db.WithContext(ctx)
db = db.Model(&BatchEvent{})
updateFields := make(map[string]interface{})
// After darwin, FinalizeBatch event signals a range of batches are finalized,
// thus losing the batch hash info. Meanwhile, only batch_index is enough to update finalized batches.
db = db.Where("batch_index <= ?", maxFinalizedBatchIndex)
db = db.Where("batch_status != ?", btypes.BatchStatusTypeFinalized)
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
updateFields["l1_block_number"] = maxL1BlockNumber
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
}
return nil
}

View File

@@ -13,7 +13,6 @@ RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .
RUN cargo build --release
RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
# Download Go dependencies
@@ -35,7 +34,6 @@ FROM base as builder
COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy alpine container

View File

@@ -1,6 +1,3 @@
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
ARG SCROLL_LIB_PATH=/scroll/lib
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
@@ -18,38 +15,14 @@ RUN go mod download -x
# Build event_watcher
FROM base as builder
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
cd /src/rollup/cmd/event_watcher/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/event_watcher
# Pull event_watcher into a second stage deploy alpine container
FROM ubuntu:20.04
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/event_watcher /bin/
WORKDIR /app

View File

@@ -1,6 +1,3 @@
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
ARG SCROLL_LIB_PATH=/scroll/lib
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
@@ -18,38 +15,14 @@ RUN go mod download -x
# Build gas_oracle
FROM base as builder
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle
# Pull gas_oracle into a second stage deploy alpine container
FROM ubuntu:20.04
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/gas_oracle /bin/
WORKDIR /app

View File

@@ -29,7 +29,14 @@ RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
# Install Go
ARG GO_VERSION
RUN rm -rf /usr/local/go
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
RUN if [ "$(uname -m)" = "x86_64" ]; then \
echo amd64 >/tmp/arch; \
elif [ "$(uname -m)" = "aarch64" ]; then \
echo arm64 >/tmp/arch; \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -25,7 +25,14 @@ RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
# Install Go
ARG GO_VERSION
RUN rm -rf /usr/local/go
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
RUN if [ "$(uname -m)" = "x86_64" ]; then \
echo amd64 >/tmp/arch; \
elif [ "$(uname -m)" = "aarch64" ]; then \
echo arm64 >/tmp/arch; \
else \
echo "Unsupported architecture"; exit 1; \
fi
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
ENV PATH="/usr/local/go/bin:${PATH}"

View File

@@ -1,6 +1,3 @@
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
ARG SCROLL_LIB_PATH=/scroll/lib
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
@@ -18,38 +15,14 @@ RUN go mod download -x
# Build rollup_relayer
FROM base as builder
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy alpine container
FROM ubuntu:20.04
ARG LIBSCROLL_ZSTD_VERSION
ARG SCROLL_LIB_PATH
RUN mkdir -p $SCROLL_LIB_PATH
RUN apt-get -qq update && apt-get -qq install -y wget
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/rollup_relayer /bin/
WORKDIR /app

4
common/libzkp/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
outputs
libzkp.so
test_zkp_test/
*log

44
common/libzkp/e2e-test.sh Normal file
View File

@@ -0,0 +1,44 @@
set -xeu
set -o pipefail
export CHAIN_ID=534352
export RUST_BACKTRACE=full
export RUST_LOG=debug
export RUST_MIN_STACK=100000000
export PROVER_OUTPUT_DIR=test_zkp_test
#export LD_LIBRARY_PATH=/:/usr/local/cuda/lib64
mkdir -p $PROVER_OUTPUT_DIR
REPO=$(realpath ../..)
function build_test_bins() {
cd impl
cargo build --release
ln -f -s $(realpath target/release/libzkp.so) $REPO/prover/core/lib
ln -f -s $(realpath target/release/libzkp.so) $REPO/coordinator/internal/logic/verifier/lib
cd $REPO/prover
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
cd $REPO/coordinator
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd $REPO/common/libzkp
}
function build_test_bins_old() {
cd $REPO
cd prover
make libzkp
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
cd ..
cd coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd ..
cd common/libzkp
}
build_test_bins
#rm -rf test_zkp_test/*
#rm -rf prover.log verifier.log
#$REPO/prover/core.test -test.v 2>&1 | tee prover.log
$REPO/coordinator/verifier.test -test.v 2>&1 | tee verifier.log

File diff suppressed because it is too large Load Diff

View File

@@ -25,7 +25,7 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
[dependencies]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", default-features = false, features = ["parallel_syn", "scroll"] }
base64 = "0.13.0"
env_logger = "0.9.0"

View File

@@ -1,5 +1,8 @@
.PHONY: help fmt clippy test test-ci test-all
build:
@cargo build --release
fmt:
@cargo fmt --all -- --check

View File

@@ -8,9 +8,10 @@ use crate::{
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
check_chunk_hashes,
consts::AGG_VK_FILENAME,
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, BlockTrace, ChunkHash, ChunkProof,
BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof,
};
use snark_verifier_sdk::verify_evm_calldata;
use std::{cell::OnceCell, env, ptr::null};
@@ -79,7 +80,7 @@ pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *con
let prover_ref = PROVER.get().expect("failed to get reference to PROVER.");
let valid = prover_ref.check_chunk_proofs(&chunk_proofs);
let valid = prover_ref.check_protocol_of_chunks(&chunk_proofs);
Ok(valid)
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
@@ -108,7 +109,7 @@ pub unsafe extern "C" fn gen_batch_proof(
let chunk_hashes = c_char_to_vec(chunk_hashes);
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_hashes = serde_json::from_slice::<Vec<ChunkHash>>(&chunk_hashes)
let chunk_hashes = serde_json::from_slice::<Vec<ChunkInfo>>(&chunk_hashes)
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
@@ -118,15 +119,19 @@ pub unsafe extern "C" fn gen_batch_proof(
chunk_hashes.len(), chunk_proofs.len()));
}
let chunk_hashes_proofs = chunk_hashes
let chunk_hashes_proofs: Vec<(_,_)> = chunk_hashes
.into_iter()
.zip(chunk_proofs)
.zip(chunk_proofs.clone())
.collect();
check_chunk_hashes("", &chunk_hashes_proofs).map_err(|e| format!("failed to check chunk info: {e:?}"))?;
let batch = BatchProvingTask {
chunk_proofs
};
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_agg_evm_proof(chunk_hashes_proofs, None, OUTPUT_DIR.as_deref())
.gen_agg_evm_proof(batch, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
@@ -157,19 +162,18 @@ pub unsafe extern "C" fn verify_batch_proof(
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let fork_name_str = c_char_to_str(fork_name);
let fork_id = match fork_name_str {
"" => 0,
"shanghai" => 0,
"bernoulli" => 1,
"bernoulli" => 2,
"curie" => 3,
_ => {
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
1
log::warn!("unexpected fork_name {fork_name_str}, treated as curie");
3
}
};
let verified = panic_catch(|| {
if fork_id == 0 {
// before upgrade#2(EIP4844)
if fork_id == 2 {
// before upgrade#3(DA Compression)
verify_evm_calldata(
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
include_bytes!("plonk_verifier_0.10.3.bin").to_vec(),
proof.calldata(),
)
} else {
@@ -187,7 +191,7 @@ pub unsafe extern "C" fn block_traces_to_chunk_info(block_traces: *const c_char)
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces).unwrap();
let witness_block = chunk_trace_to_witness_block(block_traces).unwrap();
let chunk_info = ChunkHash::from_witness_block(&witness_block, false);
let chunk_info = ChunkInfo::from_witness_block(&witness_block, false);
let chunk_info_bytes = serde_json::to_vec(&chunk_info).unwrap();
vec_to_c_char(chunk_info_bytes)

View File

@@ -10,7 +10,7 @@ use prover::{
consts::CHUNK_VK_FILENAME,
utils::init_env_and_log,
zkevm::{Prover, Verifier},
BlockTrace, ChunkProof,
BlockTrace, ChunkProof, ChunkProvingTask,
};
use std::{cell::OnceCell, env, ptr::null};
@@ -71,11 +71,12 @@ pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
let chunk = ChunkProvingTask::from(block_traces);
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_chunk_proof(block_traces, None, None, OUTPUT_DIR.as_deref())
.gen_chunk_proof(chunk, None, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))

Binary file not shown.

View File

@@ -183,6 +183,12 @@ type ChunkInfo struct {
TxBytes []byte `json:"tx_bytes"`
}
// SubCircuitRowUsage tracing info added in v0.11.0rc8
type SubCircuitRowUsage struct {
Name string `json:"name"`
RowNumber uint64 `json:"row_number"`
}
// ChunkProof includes the proof info that are required for chunk verification and rollup.
type ChunkProof struct {
StorageTrace []byte `json:"storage_trace,omitempty"`
@@ -191,8 +197,9 @@ type ChunkProof struct {
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
GitVersion string `json:"git_version,omitempty"`
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
GitVersion string `json:"git_version,omitempty"`
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
}
// BatchProof includes the proof info that are required for batch verification and rollup.

View File

@@ -108,7 +108,7 @@ func TestProofDetailHash(t *testing.T) {
}
hash, err := proofDetail.Hash()
assert.NoError(t, err)
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
expectedHash := "01128ea9006601146ba80dbda959c96ebaefca463e78570e473a57d821db5ec1"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.8"
var tag = "v4.4.23"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -0,0 +1,47 @@
# Use the latest node Debian slim base image
# This makes installing yarn dep much easier
FROM node:20-bookworm-slim
# Switch to bash shell
SHELL ["/bin/bash", "-c"]
WORKDIR /root
# Install dependencies
RUN apt update
RUN apt install --yes curl bash coreutils git jq
# Download and run the Foundry installation script
RUN curl -L https://foundry.paradigm.xyz | bash
# Set the environment variables to ensure Foundry tools are in the PATH
ENV PATH="/root/.foundry/bin:${PATH}"
# Run foundryup to update Foundry
RUN foundryup -v nightly-56dbd20c7179570c53b6c17ff34daa7273a4ddae
# copy dependencies
COPY ./lib /contracts/lib
COPY ./node_modules/@openzeppelin /contracts/node_modules/@openzeppelin
# copy configurations
COPY foundry.toml /contracts/foundry.toml
COPY remappings.txt /contracts/remappings.txt
# copy source code
COPY ./src /contracts/src
COPY ./scripts /contracts/scripts
# compile contracts
ENV FOUNDRY_EVM_VERSION="cancun"
ENV FOUNDRY_BYTECODE_HASH="none"
WORKDIR /contracts
RUN forge build
# copy script configs
COPY ./docker/templates/config-contracts.toml /contracts/docker/templates/config-contracts.toml
COPY ./docker/scripts/deploy.sh /contracts/docker/scripts/deploy.sh
ENTRYPOINT ["/bin/bash", "/contracts/docker/scripts/deploy.sh"]

View File

@@ -0,0 +1,53 @@
# Use the latest node Debian slim base image
# This makes installing yarn dep much easier
FROM node:20-bookworm-slim
# Switch to bash shell
SHELL ["/bin/bash", "-c"]
WORKDIR /root
# Install dependencies
RUN apt update
RUN apt install --yes curl bash coreutils git jq
# Download and run the Foundry installation script
RUN curl -L https://foundry.paradigm.xyz | bash
# Set the environment variables to ensure Foundry tools are in the PATH
ENV PATH="/root/.foundry/bin:${PATH}"
# Run foundryup to update Foundry
RUN foundryup -v nightly-56dbd20c7179570c53b6c17ff34daa7273a4ddae
# copy dependencies
COPY ./lib /contracts/lib
COPY ./node_modules/@openzeppelin /contracts/node_modules/@openzeppelin
# copy configurations
COPY foundry.toml /contracts/foundry.toml
COPY remappings.txt /contracts/remappings.txt
# copy source code
COPY ./src /contracts/src
COPY ./scripts /contracts/scripts
# compile contracts
ENV FOUNDRY_EVM_VERSION="cancun"
ENV FOUNDRY_BYTECODE_HASH="none"
WORKDIR /contracts
RUN forge build
# copy script configs
COPY ./docker/templates/balance-checker-config.json /contracts/docker/templates/balance-checker-config.json
COPY ./docker/templates/bridge-history-config.json /contracts/docker/templates/bridge-history-config.json
COPY ./docker/templates/chain-monitor-config.json /contracts/docker/templates/chain-monitor-config.json
COPY ./docker/templates/config-contracts.toml /contracts/docker/templates/config-contracts.toml
COPY ./docker/templates/coordinator-config.json /contracts/docker/templates/coordinator-config.json
COPY ./docker/templates/genesis.json /contracts/docker/templates/genesis.json
COPY ./docker/templates/rollup-config.json /contracts/docker/templates/rollup-config.json
COPY ./docker/scripts/gen-configs.sh /contracts/docker/scripts/gen-configs.sh
ENTRYPOINT ["/bin/bash", "/contracts/docker/scripts/gen-configs.sh"]

View File

@@ -0,0 +1,75 @@
[general]
L1_RPC_ENDPOINT = "http://l1geth:8545"
L2_RPC_ENDPOINT = "http://l2geth:8545"
CHAIN_ID_L1 = 111111
CHAIN_ID_L2 = 222222
MAX_TX_IN_CHUNK = 100
MAX_BLOCK_IN_CHUNK = 100
MAX_L1_MESSAGE_GAS_LIMIT = 10000
L1_CONTRACT_DEPLOYMENT_BLOCK = 0
TEST_ENV_MOCK_FINALIZE_ENABLED = true
TEST_ENV_MOCK_FINALIZE_TIMEOUT_SEC = 3600
[accounts]
# note: for now we simply use Anvil's dev accounts
DEPLOYER_PRIVATE_KEY = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
OWNER_PRIVATE_KEY = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
L1_COMMIT_SENDER_PRIVATE_KEY = "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d"
L1_FINALIZE_SENDER_PRIVATE_KEY = "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a"
L1_GAS_ORACLE_SENDER_PRIVATE_KEY = "0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6"
L2_GAS_ORACLE_SENDER_PRIVATE_KEY = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"
DEPLOYER_ADDR = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
OWNER_ADDR = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
L1_COMMIT_SENDER_ADDR = "0x70997970C51812dc3A010C7d01b50e0d17dc79C8"
L1_FINALIZE_SENDER_ADDR = "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC"
L1_GAS_ORACLE_SENDER_ADDR = "0x90F79bf6EB2c4f870365E785982E1f101E93b906"
L2_GAS_ORACLE_SENDER_ADDR = "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266"
L2GETH_SIGNER_0_ADDRESS = "0x756EA06BDEe36de11F22DCca45a31d8a178eF3c6"
[db]
SCROLL_DB_CONNECTION_STRING = "postgres://postgres:scroll2022@db:5432/scroll?sslmode=disable"
CHAIN_MONITOR_DB_CONNECTION_STRING = "postgres://postgres:scroll2022@db:5432/chain_monitor?sslmode=disable"
BRIDGE_HISTORY_DB_CONNECTION_STRING = "postgres://postgres:scroll2022@db:5432/bridge_history?sslmode=disable"
[genesis]
L2_MAX_ETH_SUPPLY = "226156424291633194186662080095093570025917938800079226639565593765455331328"
L2_DEPLOYER_INITIAL_BALANCE = 1000000000000000000
[contracts]
DEPLOYMENT_SALT = ""
# contracts deployed outside this script
L1_FEE_VAULT_ADDR = "0x0000000000000000000000000000000000000001"
L1_PLONK_VERIFIER_ADDR = "0x0000000000000000000000000000000000000001"
[contracts.overrides]
# L1_WETH = "0xfFf9976782d46CC05630D1f6eBAb18b2324d6B14"
L2_MESSAGE_QUEUE = "0x5300000000000000000000000000000000000000"
L1_GAS_PRICE_ORACLE = "0x5300000000000000000000000000000000000002"
L2_WHITELIST = "0x5300000000000000000000000000000000000003"
L2_WETH = "0x5300000000000000000000000000000000000004"
L2_TX_FEE_VAULT = "0x5300000000000000000000000000000000000005"
[coordinator]
COORDINATOR_JWT_SECRET_KEY = "e788b62d39254928a821ac1c76b274a8c835aa1e20ecfb6f50eb10e87847de44"

View File

@@ -0,0 +1,26 @@
#!/bin/sh
latest_commit=$(git log -1 --pretty=format:%h)
tag=${latest_commit:0:8}
echo "Using Docker image tag: $tag"
echo ""
docker build -f docker/Dockerfile.gen-configs -t scrolltech/scroll-stack-contracts:gen-configs-$tag-amd64 --platform linux/amd64 .
echo
echo "built scrolltech/scroll-stack-contracts:gen-configs-$tag-amd64"
echo
docker build -f docker/Dockerfile.gen-configs -t scrolltech/scroll-stack-contracts:gen-configs-$tag-arm64 --platform linux/arm64 .
echo
echo "built scrolltech/scroll-stack-contracts:gen-configs-$tag-arm64"
echo
docker build -f docker/Dockerfile.deploy -t scrolltech/scroll-stack-contracts:deploy-$tag-amd64 --platform linux/amd64 .
echo
echo "built scrolltech/scroll-stack-contracts:deploy-$tag-amd64"
echo
docker build -f docker/Dockerfile.deploy -t scrolltech/scroll-stack-contracts:deploy-$tag-arm64 --platform linux/arm64 .
echo
echo "built scrolltech/scroll-stack-contracts:deploy-$tag-arm64"
echo

View File

@@ -0,0 +1,46 @@
#!/bin/sh
export FOUNDRY_EVM_VERSION="cancun"
export FOUNDRY_BYTECODE_HASH="none"
if [ "${L1_RPC_ENDPOINT}" = "" ]; then
echo "L1_RPC_ENDPOINT is not set"
L1_RPC_ENDPOINT="http://host.docker.internal:8543"
fi
if [ "$L2_RPC_ENDPOINT" = "" ]; then
echo "L2_RPC_ENDPOINT is not set"
L2_RPC_ENDPOINT="http://host.docker.internal:8545"
fi
if [ "${L1_RPC_ENDPOINT}" = "" ]; then
echo "L1_RPC_ENDPOINT is not set"
L1_RPC_ENDPOINT="http://host.docker.internal:8543"
fi
if [ "${BATCH_SIZE}" = "" ]; then
BATCH_SIZE="100"
fi
echo "using L1_RPC_ENDPOINT = $L1_RPC_ENDPOINT"
echo "using L2_RPC_ENDPOINT = $L2_RPC_ENDPOINT"
# simulate L1
echo ""
echo "simulating on L1"
forge script scripts/foundry/DeployScroll.s.sol:DeployScroll --rpc-url "$L1_RPC_ENDPOINT" --sig "run(string,string)" "L1" "verify-config" || exit 1
# simulate L2
echo ""
echo "simulating on L2"
forge script scripts/foundry/DeployScroll.s.sol:DeployScroll --rpc-url "$L2_RPC_ENDPOINT" --sig "run(string,string)" "L2" "verify-config" --legacy || exit 1
# deploy L1
echo ""
echo "deploying on L1"
forge script scripts/foundry/DeployScroll.s.sol:DeployScroll --rpc-url "$L1_RPC_ENDPOINT" --batch-size "$BATCH_SIZE" --sig "run(string,string)" "L1" "verify-config" --broadcast || exit 1
# deploy L2
echo ""
echo "deploying on L2"
forge script scripts/foundry/DeployScroll.s.sol:DeployScroll --rpc-url "$L2_RPC_ENDPOINT" --batch-size "$BATCH_SIZE" --sig "run(string,string)" "L2" "verify-config" --broadcast --legacy || exit 1

View File

@@ -0,0 +1,33 @@
#!/bin/bash
echo ""
echo "generating config-contracts.toml"
forge script scripts/foundry/DeployScroll.s.sol:DeployScroll --sig "run(string,string)" "none" "write-config" || exit 1
echo ""
echo "generating genesis.json"
forge script scripts/foundry/DeployScroll.s.sol:GenerateGenesis || exit 1
echo ""
echo "generating rollup-config.json"
forge script scripts/foundry/DeployScroll.s.sol:GenerateRollupConfig || exit 1
echo ""
echo "generating coordinator-config.json"
forge script scripts/foundry/DeployScroll.s.sol:GenerateCoordinatorConfig || exit 1
echo ""
echo "generating chain-monitor-config.json"
forge script scripts/foundry/DeployScroll.s.sol:GenerateChainMonitorConfig || exit 1
echo ""
echo "generating bridge-history-config.json"
forge script scripts/foundry/DeployScroll.s.sol:GenerateBridgeHistoryConfig || exit 1
echo ""
echo "generating balance-checker-config.json"
forge script scripts/foundry/DeployScroll.s.sol:GenerateBalanceCheckerConfig || exit 1
echo ""
echo "generating .env.frontend"
forge script scripts/foundry/DeployScroll.s.sol:GenerateFrontendConfig || exit 1

View File

@@ -0,0 +1,24 @@
#!/bin/sh
latest_commit=$(git log -1 --pretty=format:%h)
tag=${latest_commit:0:8}
echo "Using Docker image tag: $tag"
echo ""
docker push scrolltech/scroll-stack-contracts:gen-configs-$tag-amd64
docker push scrolltech/scroll-stack-contracts:gen-configs-$tag-arm64
docker manifest create scrolltech/scroll-stack-contracts:gen-configs-$tag \
--amend scrolltech/scroll-stack-contracts:gen-configs-$tag-amd64 \
--amend scrolltech/scroll-stack-contracts:gen-configs-$tag-arm64
docker manifest push scrolltech/scroll-stack-contracts:gen-configs-$tag
docker push scrolltech/scroll-stack-contracts:deploy-$tag-amd64
docker push scrolltech/scroll-stack-contracts:deploy-$tag-arm64
docker manifest create scrolltech/scroll-stack-contracts:deploy-$tag \
--amend scrolltech/scroll-stack-contracts:deploy-$tag-amd64 \
--amend scrolltech/scroll-stack-contracts:deploy-$tag-arm64
docker manifest push scrolltech/scroll-stack-contracts:deploy-$tag

View File

@@ -0,0 +1,42 @@
{
"addresses": [
{
"rpc_url": "${SCROLL_L1_RPC}",
"min_balance_ether": "10",
"address": "${L1_COMMIT_SENDER_ADDRESS}",
"name": "L1_COMMIT_SENDER"
},
{
"rpc_url": "${SCROLL_L1_RPC}",
"min_balance_ether": "10",
"address": "${L1_FINALIZE_SENDER_ADDRESS}",
"name": "L1_FINALIZE_SENDER"
},
{
"rpc_url": "${SCROLL_L1_RPC}",
"min_balance_ether": "1.1",
"address": "${L1_GAS_ORACLE_SENDER_ADDRESS}",
"name": "L1_GAS_ORACLE_SENDER"
},
{
"rpc_url": "${SCROLL_L1_RPC}",
"min_balance_ether": "0",
"address": "${L1_SCROLL_FEE_VAULT_ADDRESS}",
"name": "L1_SCROLL_FEE_VAULT"
},
{
"rpc_url": "${SCROLL_L2_RPC}",
"min_balance_ether": "1.1",
"address": "${L2_GAS_ORACLE_SENDER_ADDRESS}",
"name": "L2_GAS_ORACLE_SENDER"
},
{
"rpc_url": "${SCROLL_L2_RPC}",
"min_balance_ether": "0",
"address": "${L2_TX_FEE_VAULT_ADDR}",
"name": "L2_TX_FEE_VAULT"
}
],
"JOB_INTERVAL_SECS": 60,
"BIND_PORT": 8080
}

View File

@@ -0,0 +1,56 @@
{
"L1": {
"confirmation": 0,
"endpoint": null,
"startHeight": 0,
"blockTime": 12,
"fetchLimit": 16,
"MessageQueueAddr": null,
"MessengerAddr": null,
"ScrollChainAddr": null,
"GatewayRouterAddr": null,
"ETHGatewayAddr": null,
"WETHGatewayAddr": null,
"StandardERC20GatewayAddr": null,
"CustomERC20GatewayAddr": null,
"ERC721GatewayAddr": null,
"ERC1155GatewayAddr": null,
"USDCGatewayAddr": "0x0000000000000000000000000000000000000000",
"LIDOGatewayAddr": "0x0000000000000000000000000000000000000000",
"DAIGatewayAddr": "0x0000000000000000000000000000000000000000",
"PufferGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"L2": {
"confirmation": 0,
"endpoint": null,
"blockTime": 3,
"fetchLimit": 64,
"MessageQueueAddr": null,
"MessengerAddr": null,
"GatewayRouterAddr": null,
"ETHGatewayAddr": null,
"WETHGatewayAddr": null,
"StandardERC20GatewayAddr": null,
"CustomERC20GatewayAddr": null,
"ERC721GatewayAddr": null,
"ERC1155GatewayAddr": null,
"USDCGatewayAddr": "0x0000000000000000000000000000000000000000",
"LIDOGatewayAddr": "0x0000000000000000000000000000000000000000",
"DAIGatewayAddr": "0x0000000000000000000000000000000000000000",
"PufferGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"db": {
"dsn": null,
"driverName": "postgres",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"redis": {
"address": "localhost:6379",
"username": "default",
"password": "",
"local": true,
"minIdleConns": 10,
"readTimeoutMs": 500
}
}

View File

@@ -0,0 +1,56 @@
{
"l1_config": {
"l1_url": null,
"confirm": "0x20",
"start_number": null,
"l1_contracts": {
"l1_gateways": {
"eth_gateway": null,
"weth_gateway": null,
"standard_erc20_gateway": null,
"custom_erc20_gateway": null,
"erc721_gateway": null,
"erc1155_gateway": null,
"dai_gateway": "0x0000000000000000000000000000000000000000",
"usdc_gateway": "0x0000000000000000000000000000000000000000",
"lido_gateway": "0x0000000000000000000000000000000000000000",
"puffer_gateway": "0x0000000000000000000000000000000000000000"
},
"scroll_messenger": null,
"message_queue": null,
"scroll_chain": null
},
"start_messenger_balance": null
},
"l2_config": {
"l2_url": null,
"confirm": "0x80",
"l2_contracts": {
"l2_gateways": {
"eth_gateway": null,
"weth_gateway": null,
"standard_erc20_gateway": null,
"custom_erc20_gateway": null,
"erc721_gateway": null,
"erc1155_gateway": null,
"dai_gateway": "0x0000000000000000000000000000000000000000",
"usdc_gateway": "0x0000000000000000000000000000000000000000",
"lido_gateway": "0x0000000000000000000000000000000000000000",
"puffer_gateway": "0x0000000000000000000000000000000000000000"
},
"scroll_messenger": null,
"message_queue": null
}
},
"slack_webhook_config": {
"webhook_url": "http://localhost:1234",
"worker_count": 5,
"worker_buffer_size": 1000
},
"db_config": {
"driver_name": "postgres",
"dsn": null,
"maxOpenNum": 100,
"maxIdleNum": 20
}
}

View File

@@ -0,0 +1,55 @@
L1_WETH_ADDR = ""
L1_PROXY_ADMIN_ADDR = ""
L1_PROXY_IMPLEMENTATION_PLACEHOLDER_ADDR = ""
L1_WHITELIST_ADDR = ""
L2_GAS_PRICE_ORACLE_IMPLEMENTATION_ADDR = ""
L2_GAS_PRICE_ORACLE_PROXY_ADDR = ""
L1_SCROLL_CHAIN_PROXY_ADDR = ""
L1_SCROLL_MESSENGER_PROXY_ADDR = ""
L1_ENFORCED_TX_GATEWAY_IMPLEMENTATION_ADDR = ""
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = ""
L1_ZKEVM_VERIFIER_V1_ADDR = ""
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = ""
L1_MESSAGE_QUEUE_IMPLEMENTATION_ADDR = ""
L1_MESSAGE_QUEUE_PROXY_ADDR = ""
L1_SCROLL_CHAIN_IMPLEMENTATION_ADDR = ""
L1_GATEWAY_ROUTER_IMPLEMENTATION_ADDR = ""
L1_GATEWAY_ROUTER_PROXY_ADDR = ""
L1_ETH_GATEWAY_PROXY_ADDR = ""
L1_WETH_GATEWAY_PROXY_ADDR = ""
L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = ""
L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = ""
L1_ERC721_GATEWAY_PROXY_ADDR = ""
L1_ERC1155_GATEWAY_PROXY_ADDR = ""
L2_MESSAGE_QUEUE_ADDR = ""
L1_GAS_PRICE_ORACLE_ADDR = ""
L2_WHITELIST_ADDR = ""
L2_WETH_ADDR = ""
L2_TX_FEE_VAULT_ADDR = ""
L2_PROXY_ADMIN_ADDR = ""
L2_PROXY_IMPLEMENTATION_PLACEHOLDER_ADDR = ""
L2_SCROLL_MESSENGER_PROXY_ADDR = ""
L2_ETH_GATEWAY_PROXY_ADDR = ""
L2_WETH_GATEWAY_PROXY_ADDR = ""
L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = ""
L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = ""
L2_ERC721_GATEWAY_PROXY_ADDR = ""
L2_ERC1155_GATEWAY_PROXY_ADDR = ""
L2_SCROLL_STANDARD_ERC20_ADDR = ""
L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = ""
L1_SCROLL_MESSENGER_IMPLEMENTATION_ADDR = ""
L1_STANDARD_ERC20_GATEWAY_IMPLEMENTATION_ADDR = ""
L1_ETH_GATEWAY_IMPLEMENTATION_ADDR = ""
L1_WETH_GATEWAY_IMPLEMENTATION_ADDR = ""
L1_CUSTOM_ERC20_GATEWAY_IMPLEMENTATION_ADDR = ""
L1_ERC721_GATEWAY_IMPLEMENTATION_ADDR = ""
L1_ERC1155_GATEWAY_IMPLEMENTATION_ADDR = ""
L2_SCROLL_MESSENGER_IMPLEMENTATION_ADDR = ""
L2_GATEWAY_ROUTER_IMPLEMENTATION_ADDR = ""
L2_GATEWAY_ROUTER_PROXY_ADDR = ""
L2_STANDARD_ERC20_GATEWAY_IMPLEMENTATION_ADDR = ""
L2_ETH_GATEWAY_IMPLEMENTATION_ADDR = ""
L2_WETH_GATEWAY_IMPLEMENTATION_ADDR = ""
L2_CUSTOM_ERC20_GATEWAY_IMPLEMENTATION_ADDR = ""
L2_ERC721_GATEWAY_IMPLEMENTATION_ADDR = ""
L2_ERC1155_GATEWAY_IMPLEMENTATION_ADDR = ""

View File

@@ -0,0 +1,30 @@
{
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 100,
"chunk_collection_time_sec": 3600,
"batch_collection_time_sec": 600,
"verifier": {
"fork_name": "bernoulli",
"mock_mode": false,
"params_path": "/verifier/params",
"assets_path": "/verifier/assets"
},
"max_verifier_workers": 4,
"min_prover_version": "v4.3.41"
},
"db": {
"driver_name": "postgres",
"dsn": null,
"maxOpenNum": 200,
"maxIdleNum": 20
},
"l2": {
"chain_id": null
},
"auth": {
"secret": null,
"challenge_expire_duration_sec": 10,
"login_expire_duration_sec": 3600
}
}

View File

@@ -0,0 +1,48 @@
{
"config": {
"chainId": null,
"homesteadBlock": 0,
"eip150Block": 0,
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0,
"archimedesBlock": 0,
"shanghaiBlock": 0,
"bernoulliBlock": 0,
"curieBlock": 0,
"clique": {
"period": 3,
"epoch": 30000
},
"scroll": {
"useZktrie": true,
"maxTxPerBlock": null,
"maxTxPayloadBytesPerBlock": 122880,
"feeVaultAddress": null,
"l1Config": {
"l1ChainId": null,
"l1MessageQueueAddress": null,
"scrollChainAddress": null,
"numL1MessagesPerBlock": "10"
}
}
},
"nonce": "0x0",
"timestamp": null,
"extraData": null,
"gasLimit": "10000000",
"difficulty": "0x1",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"alloc": {},
"number": "0x0",
"gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"baseFeePerGas": null
}

View File

@@ -0,0 +1,87 @@
{
"l1_config": {
"confirmations": "0x0",
"endpoint": null,
"l1_message_queue_address": null,
"scroll_chain_address": null,
"start_height": 0,
"relayer_config": {
"gas_price_oracle_contract_address": null,
"sender_config": {
"endpoint": null,
"escalate_blocks": 100,
"escalate_multiple_num": 11,
"escalate_multiple_den": 10,
"max_gas_price": 10000000000000,
"tx_type": "LegacyTx",
"check_pending_time": 3,
"confirmations": "0x0"
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000,
"l1_base_fee_weight": 0.086,
"l1_blob_base_fee_weight": 0.030
},
"gas_oracle_sender_private_key": null
}
},
"l2_config": {
"confirmations": "0x0",
"endpoint": null,
"l2_message_queue_address": null,
"relayer_config": {
"rollup_contract_address": null,
"gas_price_oracle_contract_address": null,
"sender_config": {
"endpoint": null,
"escalate_blocks": 4,
"escalate_multiple_num": 12,
"escalate_multiple_den": 10,
"max_gas_price": 200000000000,
"max_blob_gas_price": 200000000000,
"tx_type": "DynamicFeeTx",
"check_pending_time": 10,
"confirmations": "0x0"
},
"gas_oracle_config": {
"min_gas_price": 0,
"gas_price_diff": 50000
},
"chain_monitor": {
"enabled": true,
"timeout": 3,
"try_times": 5,
"base_url": "http://chain-monitorv2:8080"
},
"enable_test_env_bypass_features": null,
"finalize_batch_without_proof_timeout_sec": null,
"gas_oracle_sender_private_key": null,
"commit_sender_private_key": null,
"finalize_sender_private_key": null,
"l1_commit_gas_limit_multiplier": 1.2
},
"chunk_proposer_config": {
"max_block_num_per_chunk": null,
"max_tx_num_per_chunk": null,
"max_l1_commit_gas_per_chunk": 5000000,
"max_l1_commit_calldata_size_per_chunk": 110000,
"chunk_timeout_sec": 2700,
"max_row_consumption_per_chunk": 1000000,
"gas_cost_increase_multiplier": 1.2
},
"batch_proposer_config": {
"max_chunk_num_per_batch": 15,
"max_l1_commit_gas_per_batch": 5000000,
"max_l1_commit_calldata_size_per_batch": 110000,
"batch_timeout_sec": 2700,
"gas_cost_increase_multiplier": 1.2
}
},
"db_config": {
"driver_name": "postgres",
"dsn": null,
"maxOpenNum": 50,
"maxIdleNum": 20
}
}

View File

@@ -38,7 +38,7 @@ Mapping from L2 ERC20 token address to corresponding L2ERC20Gateway.
function defaultERC20Gateway() external view returns (address)
```
The addess of default L2 ERC20 gateway, normally the L2StandardERC20Gateway contract.
The address of default L2 ERC20 gateway, normally the L2StandardERC20Gateway contract.

View File

@@ -882,17 +882,6 @@ error ErrorIncorrectPreviousStateRoot()
*Thrown when the previous state root doesn&#39;t match stored one.*
### ErrorInvalidBatchHeaderVersion
```solidity
error ErrorInvalidBatchHeaderVersion()
```
*Thrown when the batch header version is invalid.*
### ErrorLastL1MessageSkipped
```solidity
@@ -956,7 +945,7 @@ error ErrorRevertFinalizedBatch()
*Thrown when reverting a finialized batch.*
*Thrown when reverting a finalized batch.*
### ErrorRevertNotStartFromEnd
@@ -967,7 +956,7 @@ error ErrorRevertNotStartFromEnd()
*Thrown when the reverted batches are not in the ending of commited batch chain.*
*Thrown when the reverted batches are not in the ending of committed batch chain.*
### ErrorRevertZeroBatches

View File

@@ -7,15 +7,13 @@ libs = [] # a list of librar
remappings = [] # a list of remappings
libraries = [] # a list of deployed libraries to link against
cache = true # whether to cache builds or not
force = true # whether to ignore the cache (clean build)
# evm_version = 'london' # the evm version (by hardfork name)
force = false # whether to ignore the cache (clean build)
evm_version = 'cancun' # the evm version (by hardfork name)
solc_version = '0.8.24' # override for the solc version (setting this ignores `auto_detect_solc`)
optimizer = true # enable or disable the solc optimizer
optimizer_runs = 200 # the number of optimizer runs
verbosity = 2 # the verbosity of tests
ignored_error_codes = [] # a list of ignored solc error codes
fuzz_runs = 256 # the number of fuzz runs for tests
ffi = false # whether to enable ffi or not
sender = '0x00a329c0648769a73afac7f9381e08fb43dbea72' # the address of `msg.sender` in tests
tx_origin = '0x00a329c0648769a73afac7f9381e08fb43dbea72' # the address of `tx.origin` in tests
initial_balance = '0xffffffffffffffffffffffff' # the initial balance of the test contract
@@ -27,4 +25,15 @@ block_coinbase = '0x0000000000000000000000000000000000000000' # the address of `
block_timestamp = 0 # the value of `block.timestamp` in tests
block_difficulty = 0 # the value of `block.difficulty` in tests
# remove bytecode hash for reliable deterministic addresses
bytecode_hash = 'none'
# file system permissions
ffi = true
fs_permissions = [
{ access='read', path='./docker' },
{ access='read-write', path='./volume' },
]
gas_reports = ["L2GasPriceOracle"]

View File

@@ -98,19 +98,6 @@ describe("ScrollChain.blob", async () => {
batchHeader0[25] = 1;
});
it("should revert when ErrorInvalidBatchHeaderVersion", async () => {
const header = new Uint8Array(121);
header[0] = 2;
await expect(chain.commitBatch(1, header, ["0x"], "0x")).to.revertedWithCustomError(
chain,
"ErrorInvalidBatchHeaderVersion"
);
await expect(chain.commitBatch(2, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
chain,
"ErrorInvalidBatchHeaderVersion"
);
});
it("should revert when ErrorNoBlobFound", async () => {
await expect(chain.commitBatch(1, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
chain,

View File

@@ -96,7 +96,7 @@ contract DeployL1BridgeContracts is Script {
address[] memory _verifiers = new address[](1);
_versions[0] = 0;
_verifiers[0] = address(zkEvmVerifierV1);
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
rollupVerifier = new MultipleVersionRollupVerifier(_versions, _verifiers);
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
}

File diff suppressed because it is too large Load Diff

View File

@@ -44,7 +44,7 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
struct ReplayState {
// The number of replayed times.
uint128 times;
// The queue index of lastest replayed one. If it is zero, it means the message has not been replayed.
// The queue index of latest replayed one. If it is zero, it means the message has not been replayed.
uint128 lastIndex;
}

View File

@@ -167,7 +167,7 @@ abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, Scrol
/// @dev Internal function to do all the deposit operations.
///
/// @param _token The token to deposit.
/// @param _to The recipient address to recieve the token in L2.
/// @param _to The recipient address to receive the token in L2.
/// @param _amount The amount of token to deposit.
/// @param _data Optional data to forward to recipient's account.
/// @param _gasLimit Gas limit required to complete the deposit on L2.

View File

@@ -25,7 +25,7 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
/// @notice The address of L1ETHGateway.
address public ethGateway;
/// @notice The addess of default ERC20 gateway, normally the L1StandardERC20Gateway contract.
/// @notice The address of default ERC20 gateway, normally the L1StandardERC20Gateway contract.
address public defaultERC20Gateway;
/// @notice Mapping from ERC20 token address to corresponding L1ERC20Gateway.

View File

@@ -97,7 +97,7 @@ contract L1StandardERC20Gateway is L1ERC20Gateway {
/// @inheritdoc IL1ERC20Gateway
function getL2ERC20Address(address _l1Token) public view override returns (address) {
// In StandardERC20Gateway, all corresponding l2 tokens are depoyed by Create2 with salt,
// In StandardERC20Gateway, all corresponding l2 tokens are deployed by Create2 with salt,
// we can calculate the l2 address directly.
bytes32 _salt = keccak256(abi.encodePacked(counterpart, keccak256(abi.encodePacked(_l1Token))));

View File

@@ -4,7 +4,6 @@ pragma solidity =0.8.24;
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {IScrollChain} from "./IScrollChain.sol";
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
@@ -28,19 +27,9 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// @dev Thrown when the given address is `address(0)`.
error ErrorZeroAddress();
/// @dev Thrown when the given start batch index is finalized.
error ErrorStartBatchIndexFinalized();
/// @dev Thrown when the given start batch index is smaller than `latestVerifier.startBatchIndex`.
error ErrorStartBatchIndexTooSmall();
/*************
* Constants *
*************/
/// @notice The address of ScrollChain contract.
address public immutable scrollChain;
/***********
* Structs *
***********/
@@ -67,14 +56,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
* Constructor *
***************/
constructor(
address _scrollChain,
uint256[] memory _versions,
address[] memory _verifiers
) {
if (_scrollChain == address(0)) revert ErrorZeroAddress();
scrollChain = _scrollChain;
constructor(uint256[] memory _versions, address[] memory _verifiers) {
for (uint256 i = 0; i < _versions.length; i++) {
if (_verifiers[i] == address(0)) revert ErrorZeroAddress();
latestVerifier[_versions[i]].verifier = _verifiers[i];
@@ -157,8 +139,11 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
uint64 _startBatchIndex,
address _verifier
) external onlyOwner {
if (_startBatchIndex <= IScrollChain(scrollChain).lastFinalizedBatchIndex())
revert ErrorStartBatchIndexFinalized();
// We are using version to decide the verifier to use and also this function is
// controlled by 7 days TimeLock. It is hard to predict `lastFinalizedBatchIndex` after 7 days.
// So we decide to remove this check to make verifier updating more easier.
// if (_startBatchIndex <= IScrollChain(scrollChain).lastFinalizedBatchIndex())
// revert ErrorStartBatchIndexFinalized();
Verifier memory _latestVerifier = latestVerifier[_version];
if (_startBatchIndex < _latestVerifier.startBatchIndex) revert ErrorStartBatchIndexTooSmall();

View File

@@ -74,9 +74,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @dev Thrown when the previous state root doesn't match stored one.
error ErrorIncorrectPreviousStateRoot();
/// @dev Thrown when the batch header version is invalid.
error ErrorInvalidBatchHeaderVersion();
/// @dev Thrown when the last message is skipped.
error ErrorLastL1MessageSkipped();
@@ -92,10 +89,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @dev Thrown when the number of batches to revert is zero.
error ErrorRevertZeroBatches();
/// @dev Thrown when the reverted batches are not in the ending of commited batch chain.
/// @dev Thrown when the reverted batches are not in the ending of committed batch chain.
error ErrorRevertNotStartFromEnd();
/// @dev Thrown when reverting a finialized batch.
/// @dev Thrown when reverting a finalized batch.
error ErrorRevertFinalizedBatch();
/// @dev Thrown when the given state root is zero.
@@ -115,11 +112,12 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
*************/
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
address internal constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
/// point evaluation precompile
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
uint256 internal constant BLS_MODULUS =
52435875175126190479447740508185965837690552500527637822603658699938581184513;
/// @notice The chain id of the corresponding layer 2 chain.
uint64 public immutable layer2ChainId;
@@ -310,7 +308,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
batchPtr,
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
);
} else if (_version == 1) {
} else if (_version >= 1) {
// versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec,
// but they use different blob encoding and different verifiers.
bytes32 blobVersionedHash;
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
_totalL1MessagesPoppedOverall,
@@ -322,7 +323,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
}
// store entries, the order matters
BatchHeaderV1Codec.storeVersion(batchPtr, 1);
BatchHeaderV1Codec.storeVersion(batchPtr, _version);
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
@@ -335,8 +336,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
batchPtr,
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
);
} else {
revert ErrorInvalidBatchHeaderVersion();
}
// check the length of bitmap
@@ -581,7 +580,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @param _chunks The list of chunks to commit.
/// @param _skippedL1MessageBitmap The bitmap indicates whether each L1 message is skipped or not.
/// @return _batchDataHash The computed data hash for the list of chunks.
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one.
function _commitChunksV0(
uint256 _totalL1MessagesPoppedOverall,
bytes[] memory _chunks,
@@ -628,7 +627,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @param _skippedL1MessageBitmap The bitmap indicates whether each L1 message is skipped or not.
/// @return _blobVersionedHash The blob versioned hash for the blob carried in this transaction.
/// @return _batchDataHash The computed data hash for the list of chunks.
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one.
function _commitChunksV1(
uint256 _totalL1MessagesPoppedOverall,
bytes[] memory _chunks,
@@ -711,18 +710,15 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
version := shr(248, calldataload(_batchHeader.offset))
}
// version should be always 0 or 1 in current code
uint256 _length;
if (version == 0) {
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
} else if (version == 1) {
} else if (version >= 1) {
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
} else {
revert ErrorInvalidBatchHeaderVersion();
}
// only check when genesis is imported
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {
@@ -954,7 +950,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @dev Internal function to pop finalized l1 messages.
/// @param bitmapPtr The memory offset of `skippedL1MessageBitmap`.
/// @param totalL1MessagePopped The total number of L1 messages poped in all batches including current batch.
/// @param totalL1MessagePopped The total number of L1 messages popped in all batches including current batch.
/// @param l1MessagePopped The number of L1 messages popped in current batch.
function _popL1Messages(
uint256 bitmapPtr,

View File

@@ -21,7 +21,7 @@ contract L2GatewayRouter is OwnableUpgradeable, IL2GatewayRouter {
/// @notice The address of L2ETHGateway.
address public ethGateway;
/// @notice The addess of default L2 ERC20 gateway, normally the L2StandardERC20Gateway contract.
/// @notice The address of default L2 ERC20 gateway, normally the L2StandardERC20Gateway contract.
address public defaultERC20Gateway;
/// @notice Mapping from L2 ERC20 token address to corresponding L2ERC20Gateway.

View File

@@ -15,10 +15,22 @@ interface IL1GasPriceOracle {
/// @param scalar The current fee scalar updated.
event ScalarUpdated(uint256 scalar);
/// @notice Emitted when current commit fee scalar is updated.
/// @param scalar The current commit fee scalar updated.
event CommitScalarUpdated(uint256 scalar);
/// @notice Emitted when current blob fee scalar is updated.
/// @param scalar The current blob fee scalar updated.
event BlobScalarUpdated(uint256 scalar);
/// @notice Emitted when current l1 base fee is updated.
/// @param l1BaseFee The current l1 base fee updated.
event L1BaseFeeUpdated(uint256 l1BaseFee);
/// @notice Emitted when current l1 blob base fee is updated.
/// @param l1BlobBaseFee The current l1 blob base fee updated.
event L1BlobBaseFeeUpdated(uint256 l1BlobBaseFee);
/*************************
* Public View Functions *
*************************/
@@ -26,15 +38,24 @@ interface IL1GasPriceOracle {
/// @notice Return the current l1 fee overhead.
function overhead() external view returns (uint256);
/// @notice Return the current l1 fee scalar.
/// @notice Return the current l1 fee scalar before Curie fork.
function scalar() external view returns (uint256);
/// @notice Return the current l1 commit fee scalar.
function commitScalar() external view returns (uint256);
/// @notice Return the current l1 blob fee scalar.
function blobScalar() external view returns (uint256);
/// @notice Return the latest known l1 base fee.
function l1BaseFee() external view returns (uint256);
/// @notice Return the latest known l1 blob base fee.
function l1BlobBaseFee() external view returns (uint256);
/// @notice Computes the L1 portion of the fee based on the size of the rlp encoded input
/// transaction, the current L1 base fee, and the various dynamic parameters.
/// @param data Unsigned fully RLP-encoded transaction to get the L1 fee for.
/// @param data Signed fully RLP-encoded transaction to get the L1 fee for.
/// @return L1 fee that should be paid for the tx
function getL1Fee(bytes memory data) external view returns (uint256);
@@ -42,7 +63,7 @@ interface IL1GasPriceOracle {
/// represents the per-transaction gas overhead of posting the transaction and state
/// roots to L1. Adds 74 bytes of padding to account for the fact that the input does
/// not have a signature.
/// @param data Unsigned fully RLP-encoded transaction to get the L1 gas for.
/// @param data Signed fully RLP-encoded transaction to get the L1 gas for.
/// @return Amount of L1 gas used to publish the transaction.
function getL1GasUsed(bytes memory data) external view returns (uint256);
@@ -53,4 +74,9 @@ interface IL1GasPriceOracle {
/// @notice Allows whitelisted caller to modify the l1 base fee.
/// @param _l1BaseFee New l1 base fee.
function setL1BaseFee(uint256 _l1BaseFee) external;
/// @notice Allows whitelisted caller to modify the l1 base fee.
/// @param _l1BaseFee New l1 base fee.
/// @param _l1BlobBaseFee New l1 blob base fee.
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external;
}

View File

@@ -242,7 +242,7 @@ contract L1BlockContainer is OwnableBase, IL1BlockContainer {
let _computedBlockHash := keccak256(memPtr, headerPayloadLength)
require(eq(_blockHash, _computedBlockHash), "Block hash mismatch")
// load 16 vaules
// load 16 values
for {
let i := 0
} lt(i, 16) {

View File

@@ -17,6 +17,28 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
/// @param _newWhitelist The address of new whitelist contract.
event UpdateWhitelist(address _oldWhitelist, address _newWhitelist);
/**********
* Errors *
**********/
/// @dev Thrown when the blob fee scalar exceed `MAX_BLOB_SCALAR`.
error ErrExceedMaxBlobScalar();
/// @dev Thrown when the commit fee scalar exceed `MAX_COMMIT_SCALAR`.
error ErrExceedMaxCommitScalar();
/// @dev Thrown when the l1 fee overhead exceed `MAX_OVERHEAD`.
error ErrExceedMaxOverhead();
/// @dev Thrown when the l1 fee scalar exceed `MAX_SCALAR`.
error ErrExceedMaxScalar();
/// @dev Thrown when the caller is not whitelisted.
error ErrCallerNotWhitelisted();
/// @dev Thrown when we enable Curie fork after Curie fork.
error ErrAlreadyInCurieFork();
/*************
* Constants *
*************/
@@ -28,9 +50,25 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
/// Computed based on current l1 block gas limit.
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
/// @dev The maximum possible l1 fee scale.
/// @dev The maximum possible l1 fee scale before Curie.
/// x1000 should be enough.
uint256 private constant MAX_SCALE = 1000 * PRECISION;
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
/// @dev The maximum possible l1 commit fee scalar after Curie.
/// We derive the commit scalar by
/// ```
/// commit_scalar = commit_gas_per_tx * fluctuation_multiplier * 1e9
/// ```
/// So, the value should not exceed 10^9 * 1e9 normally.
uint256 private constant MAX_COMMIT_SCALAR = 10**9 * PRECISION;
/// @dev The maximum possible l1 blob fee scalar after Curie.
/// We derive the blob scalar by
/// ```
/// blob_scalar = fluctuation_multiplier / compression_ratio / blob_util_ratio * 1e9
/// ```
/// So, the value should not exceed 10^9 * 1e9 normally.
uint256 private constant MAX_BLOB_SCALAR = 10**9 * PRECISION;
/*************
* Variables *
@@ -48,12 +86,36 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
/// @notice The address of whitelist contract.
IWhitelist public whitelist;
/// @inheritdoc IL1GasPriceOracle
uint256 public override l1BlobBaseFee;
/// @inheritdoc IL1GasPriceOracle
uint256 public override commitScalar;
/// @inheritdoc IL1GasPriceOracle
uint256 public override blobScalar;
/// @notice Indicates whether the network has gone through the Curie upgrade.
bool public isCurie;
/*************
* Modifiers *
*************/
modifier onlyWhitelistedSender() {
if (!whitelist.isSenderAllowed(msg.sender)) revert ErrCallerNotWhitelisted();
_;
}
/***************
* Constructor *
***************/
constructor(address _owner) {
_transferOwnership(_owner);
// by default we enable Curie from genesis
isCurie = true;
}
/*************************
@@ -62,15 +124,117 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
/// @inheritdoc IL1GasPriceOracle
function getL1Fee(bytes memory _data) external view override returns (uint256) {
uint256 _l1GasUsed = getL1GasUsed(_data);
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
return (_l1Fee * scalar) / PRECISION;
if (isCurie) {
return _getL1FeeCurie(_data);
} else {
return _getL1FeeBeforeCurie(_data);
}
}
/// @inheritdoc IL1GasPriceOracle
/// @dev The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
function getL1GasUsed(bytes memory _data) public view override returns (uint256) {
if (isCurie) {
// It is near zero since we put all transactions to blob.
return 0;
} else {
return _getL1GasUsedBeforeCurie(_data);
}
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL1GasPriceOracle
function setL1BaseFee(uint256 _l1BaseFee) external override onlyWhitelistedSender {
l1BaseFee = _l1BaseFee;
emit L1BaseFeeUpdated(_l1BaseFee);
}
/// @inheritdoc IL1GasPriceOracle
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee)
external
override
onlyWhitelistedSender
{
l1BaseFee = _l1BaseFee;
l1BlobBaseFee = _l1BlobBaseFee;
emit L1BaseFeeUpdated(_l1BaseFee);
emit L1BlobBaseFeeUpdated(_l1BlobBaseFee);
}
/************************
* Restricted Functions *
************************/
/// @notice Allows the owner to modify the overhead.
/// @param _overhead New overhead
function setOverhead(uint256 _overhead) external onlyOwner {
if (_overhead > MAX_OVERHEAD) revert ErrExceedMaxOverhead();
overhead = _overhead;
emit OverheadUpdated(_overhead);
}
/// Allows the owner to modify the scalar.
/// @param _scalar New scalar
function setScalar(uint256 _scalar) external onlyOwner {
if (_scalar > MAX_SCALAR) revert ErrExceedMaxScalar();
scalar = _scalar;
emit ScalarUpdated(_scalar);
}
/// Allows the owner to modify the commit scalar.
/// @param _scalar New scalar
function setCommitScalar(uint256 _scalar) external onlyOwner {
if (_scalar > MAX_COMMIT_SCALAR) revert ErrExceedMaxCommitScalar();
commitScalar = _scalar;
emit CommitScalarUpdated(_scalar);
}
/// Allows the owner to modify the blob scalar.
/// @param _scalar New scalar
function setBlobScalar(uint256 _scalar) external onlyOwner {
if (_scalar > MAX_BLOB_SCALAR) revert ErrExceedMaxBlobScalar();
blobScalar = _scalar;
emit BlobScalarUpdated(_scalar);
}
/// @notice Update whitelist contract.
/// @dev This function can only called by contract owner.
/// @param _newWhitelist The address of new whitelist contract.
function updateWhitelist(address _newWhitelist) external onlyOwner {
address _oldWhitelist = address(whitelist);
whitelist = IWhitelist(_newWhitelist);
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
}
/// @notice Enable the Curie fork (callable by contract owner).
///
/// @dev Since this is a predeploy contract, we will directly set the slot while hard fork
/// to avoid external owner operations.
/// The reason that we keep this function is for easy unit testing.
function enableCurie() external onlyOwner {
if (isCurie) revert ErrAlreadyInCurieFork();
isCurie = true;
}
/**********************
* Internal Functions *
**********************/
/// @dev Internal function to computes the amount of L1 gas used for a transaction before Curie fork.
/// The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
/// @param _data Signed fully RLP-encoded transaction to get the L1 gas for.
/// @return Amount of L1 gas used to publish the transaction.
function _getL1GasUsedBeforeCurie(bytes memory _data) private view returns (uint256) {
uint256 _total = 0;
uint256 _length = _data.length;
unchecked {
@@ -85,48 +249,22 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
}
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL1GasPriceOracle
function setL1BaseFee(uint256 _l1BaseFee) external override {
require(whitelist.isSenderAllowed(msg.sender), "Not whitelisted sender");
l1BaseFee = _l1BaseFee;
emit L1BaseFeeUpdated(_l1BaseFee);
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
/// transaction, the current L1 base fee, and the various dynamic parameters, before Curie fork.
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
/// @return L1 fee that should be paid for the tx
function _getL1FeeBeforeCurie(bytes memory _data) private view returns (uint256) {
uint256 _l1GasUsed = _getL1GasUsedBeforeCurie(_data);
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
return (_l1Fee * scalar) / PRECISION;
}
/************************
* Restricted Functions *
************************/
/// @notice Allows the owner to modify the overhead.
/// @param _overhead New overhead
function setOverhead(uint256 _overhead) external onlyOwner {
require(_overhead <= MAX_OVERHEAD, "exceed maximum overhead");
overhead = _overhead;
emit OverheadUpdated(_overhead);
}
/// Allows the owner to modify the scalar.
/// @param _scalar New scalar
function setScalar(uint256 _scalar) external onlyOwner {
require(_scalar <= MAX_SCALE, "exceed maximum scale");
scalar = _scalar;
emit ScalarUpdated(_scalar);
}
/// @notice Update whitelist contract.
/// @dev This function can only called by contract owner.
/// @param _newWhitelist The address of new whitelist contract.
function updateWhitelist(address _newWhitelist) external onlyOwner {
address _oldWhitelist = address(whitelist);
whitelist = IWhitelist(_newWhitelist);
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
/// transaction, the current L1 base fee, and the various dynamic parameters, after Curie fork.
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
/// @return L1 fee that should be paid for the tx
function _getL1FeeCurie(bytes memory _data) private view returns (uint256) {
// We have bounded the value of `commitScalar` and `blobScalar`, the whole expression won't overflow.
return (commitScalar * l1BaseFee + blobScalar * _data.length * l1BlobBaseFee) / PRECISION;
}
}

View File

@@ -23,7 +23,7 @@ contract ScrollStandardERC20Factory is Ownable, IScrollStandardERC20Factory {
/// @inheritdoc IScrollStandardERC20Factory
function computeL2TokenAddress(address _gateway, address _l1Token) external view returns (address) {
// In StandardERC20Gateway, all corresponding l2 tokens are depoyed by Create2 with salt,
// In StandardERC20Gateway, all corresponding l2 tokens are deployed by Create2 with salt,
// we can calculate the l2 address directly.
bytes32 _salt = _getSalt(_gateway, _l1Token);

View File

@@ -369,7 +369,7 @@ library PatriciaMerkleTrieVerifier {
// first item is considered the root node.
// Otherwise verifies that the hash of the current node
// is the same as the previous choosen one.
// is the same as the previous chosen one.
switch i
case 1 {
rootHash := hash
@@ -425,7 +425,7 @@ library PatriciaMerkleTrieVerifier {
}
}
// lastly, derive the path of the choosen one (TM)
// lastly, derive the path of the chosen one (TM)
path := derivePath(key, depth)
}

View File

@@ -113,7 +113,7 @@ library ZkTrieVerifier {
// first item is considered the root node.
// Otherwise verifies that the hash of the current node
// is the same as the previous choosen one.
// is the same as the previous chosen one.
switch depth
case 1 {
rootHash := hash
@@ -262,7 +262,7 @@ library ZkTrieVerifier {
ptr, storageValue := verifyStorageProof(poseidon, storageKey, storageRootHash, ptr)
// the one and only boundary check
// in case an attacker crafted a malicous payload
// in case an attacker crafted a malicious payload
// and succeeds in the prior verification steps
// then this should catch any bogus accesses
if iszero(eq(ptr, add(proof.offset, proof.length))) {

View File

@@ -0,0 +1,119 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.24;
import {ScrollChain} from "../L1/rollup/ScrollChain.sol";
import {BatchHeaderV0Codec} from "../libraries/codec/BatchHeaderV0Codec.sol";
import {BatchHeaderV1Codec} from "../libraries/codec/BatchHeaderV1Codec.sol";
contract ScrollChainMockFinalize is ScrollChain {
/***************
* Constructor *
***************/
/// @notice Constructor for `ScrollChain` implementation contract.
///
/// @param _chainId The chain id of L2.
/// @param _messageQueue The address of `L1MessageQueue` contract.
/// @param _verifier The address of zkevm verifier contract.
constructor(
uint64 _chainId,
address _messageQueue,
address _verifier
) ScrollChain(_chainId, _messageQueue, _verifier) {}
/*****************************
* Public Mutating Functions *
*****************************/
function finalizeBatch(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot
) external OnlyProver whenNotPaused {
require(_prevStateRoot != bytes32(0), "previous state root is zero");
require(_postStateRoot != bytes32(0), "new state root is zero");
// compute batch hash and verify
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
// verify previous state root.
require(finalizedStateRoots[_batchIndex - 1] == _prevStateRoot, "incorrect previous state root");
// avoid duplicated verification
require(finalizedStateRoots[_batchIndex] == bytes32(0), "batch already verified");
// check and update lastFinalizedBatchIndex
unchecked {
require(lastFinalizedBatchIndex + 1 == _batchIndex, "incorrect batch index");
lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
// Pop finalized and non-skipped message from L1MessageQueue.
_popL1Messages(
BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
BatchHeaderV0Codec.getL1MessagePopped(memPtr)
);
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
function finalizeBatch4844(
bytes calldata _batchHeader,
bytes32 _prevStateRoot,
bytes32 _postStateRoot,
bytes32 _withdrawRoot,
bytes calldata _blobDataProof
) external OnlyProver whenNotPaused {
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
// compute batch hash and verify
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
// Calls the point evaluation precompile and verifies the output
{
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
abi.encodePacked(_blobVersionedHash, _blobDataProof)
);
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
(, uint256 result) = abi.decode(data, (uint256, uint256));
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
}
// verify previous state root.
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
// avoid duplicated verification
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
// check and update lastFinalizedBatchIndex
unchecked {
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
lastFinalizedBatchIndex = _batchIndex;
}
// record state root and withdraw root
finalizedStateRoots[_batchIndex] = _postStateRoot;
withdrawRoots[_batchIndex] = _withdrawRoot;
// Pop finalized and non-skipped message from L1MessageQueue.
_popL1Messages(
BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
BatchHeaderV1Codec.getL1MessagePopped(memPtr)
);
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
}
}

View File

@@ -4,14 +4,15 @@ pragma solidity =0.8.24;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {L1BlockContainer} from "../L2/predeploys/L1BlockContainer.sol";
import {L1GasPriceOracle} from "../L2/predeploys/L1GasPriceOracle.sol";
import {Whitelist} from "../L2/predeploys/Whitelist.sol";
contract L1GasPriceOracleTest is DSTestPlus {
uint256 private constant PRECISION = 1e9;
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
uint256 private constant MAX_SCALE = 1000 * PRECISION;
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
uint256 private constant MAX_COMMIT_SCALAR = 10**9 * PRECISION;
uint256 private constant MAX_BLOB_SCALAR = 10**9 * PRECISION;
L1GasPriceOracle private oracle;
Whitelist private whitelist;
@@ -36,7 +37,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
hevm.stopPrank();
// overhead is too large
hevm.expectRevert("exceed maximum overhead");
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxOverhead.selector);
oracle.setOverhead(MAX_OVERHEAD + 1);
// call by owner, should succeed
@@ -46,7 +47,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
}
function testSetScalar(uint256 _scalar) external {
_scalar = bound(_scalar, 0, MAX_SCALE);
_scalar = bound(_scalar, 0, MAX_SCALAR);
// call by non-owner, should revert
hevm.startPrank(address(1));
@@ -55,8 +56,8 @@ contract L1GasPriceOracleTest is DSTestPlus {
hevm.stopPrank();
// scale is too large
hevm.expectRevert("exceed maximum scale");
oracle.setScalar(MAX_SCALE + 1);
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxScalar.selector);
oracle.setScalar(MAX_SCALAR + 1);
// call by owner, should succeed
assertEq(oracle.scalar(), 0);
@@ -64,6 +65,44 @@ contract L1GasPriceOracleTest is DSTestPlus {
assertEq(oracle.scalar(), _scalar);
}
function testSetCommitScalar(uint256 _scalar) external {
_scalar = bound(_scalar, 0, MAX_COMMIT_SCALAR);
// call by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert("caller is not the owner");
oracle.setCommitScalar(_scalar);
hevm.stopPrank();
// scale is too large
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxCommitScalar.selector);
oracle.setCommitScalar(MAX_COMMIT_SCALAR + 1);
// call by owner, should succeed
assertEq(oracle.commitScalar(), 0);
oracle.setCommitScalar(_scalar);
assertEq(oracle.commitScalar(), _scalar);
}
function testSetBlobScalar(uint256 _scalar) external {
_scalar = bound(_scalar, 0, MAX_BLOB_SCALAR);
// call by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert("caller is not the owner");
oracle.setBlobScalar(_scalar);
hevm.stopPrank();
// scale is too large
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxBlobScalar.selector);
oracle.setBlobScalar(MAX_COMMIT_SCALAR + 1);
// call by owner, should succeed
assertEq(oracle.blobScalar(), 0);
oracle.setBlobScalar(_scalar);
assertEq(oracle.blobScalar(), _scalar);
}
function testUpdateWhitelist(address _newWhitelist) external {
hevm.assume(_newWhitelist != address(whitelist));
@@ -79,12 +118,29 @@ contract L1GasPriceOracleTest is DSTestPlus {
assertEq(address(oracle.whitelist()), _newWhitelist);
}
function testEnableCurie() external {
// call by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert("caller is not the owner");
oracle.enableCurie();
hevm.stopPrank();
// call by owner, should succeed
assertBoolEq(oracle.isCurie(), false);
oracle.enableCurie();
assertBoolEq(oracle.isCurie(), true);
// enable twice, should revert
hevm.expectRevert(L1GasPriceOracle.ErrAlreadyInCurieFork.selector);
oracle.enableCurie();
}
function testSetL1BaseFee(uint256 _baseFee) external {
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
// call by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Not whitelisted sender");
hevm.expectRevert(L1GasPriceOracle.ErrCallerNotWhitelisted.selector);
oracle.setL1BaseFee(_baseFee);
hevm.stopPrank();
@@ -94,7 +150,25 @@ contract L1GasPriceOracleTest is DSTestPlus {
assertEq(oracle.l1BaseFee(), _baseFee);
}
function testGetL1GasUsed(uint256 _overhead, bytes memory _data) external {
function testSetL1BaseFeeAndBlobBaseFee(uint256 _baseFee, uint256 _blobBaseFee) external {
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
_blobBaseFee = bound(_blobBaseFee, 0, 1e9 * 20000); // max 20k gwei
// call by non-owner, should revert
hevm.startPrank(address(1));
hevm.expectRevert(L1GasPriceOracle.ErrCallerNotWhitelisted.selector);
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
hevm.stopPrank();
// call by owner, should succeed
assertEq(oracle.l1BaseFee(), 0);
assertEq(oracle.l1BlobBaseFee(), 0);
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
assertEq(oracle.l1BaseFee(), _baseFee);
assertEq(oracle.l1BlobBaseFee(), _blobBaseFee);
}
function testGetL1GasUsedBeforeCurie(uint256 _overhead, bytes memory _data) external {
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
oracle.setOverhead(_overhead);
@@ -108,14 +182,14 @@ contract L1GasPriceOracleTest is DSTestPlus {
assertEq(oracle.getL1GasUsed(_data), _gasUsed);
}
function testGetL1Fee(
function testGetL1FeeBeforeCurie(
uint256 _baseFee,
uint256 _overhead,
uint256 _scalar,
bytes memory _data
) external {
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
_scalar = bound(_scalar, 0, MAX_SCALE);
_scalar = bound(_scalar, 0, MAX_SCALAR);
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
oracle.setOverhead(_overhead);
@@ -130,4 +204,32 @@ contract L1GasPriceOracleTest is DSTestPlus {
assertEq(oracle.getL1Fee(_data), (_gasUsed * _baseFee * _scalar) / PRECISION);
}
function testGetL1GasUsedCurie(bytes memory _data) external {
oracle.enableCurie();
assertEq(oracle.getL1GasUsed(_data), 0);
}
function testGetL1FeeCurie(
uint256 _baseFee,
uint256 _blobBaseFee,
uint256 _commitScalar,
uint256 _blobScalar,
bytes memory _data
) external {
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
_blobBaseFee = bound(_blobBaseFee, 0, 1e9 * 20000); // max 20k gwei
_commitScalar = bound(_commitScalar, 0, MAX_COMMIT_SCALAR);
_blobScalar = bound(_blobScalar, 0, MAX_BLOB_SCALAR);
oracle.enableCurie();
oracle.setCommitScalar(_commitScalar);
oracle.setBlobScalar(_blobScalar);
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
assertEq(
oracle.getL1Fee(_data),
(_commitScalar * _baseFee + _blobScalar * _blobBaseFee * _data.length) / PRECISION
);
}
}

View File

@@ -18,19 +18,17 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
MockZkEvmVerifier private v0;
MockZkEvmVerifier private v1;
MockZkEvmVerifier private v2;
MockScrollChain private chain;
function setUp() external {
v0 = new MockZkEvmVerifier();
v1 = new MockZkEvmVerifier();
v2 = new MockZkEvmVerifier();
chain = new MockScrollChain(address(1), address(1));
uint256[] memory _versions = new uint256[](1);
address[] memory _verifiers = new address[](1);
_versions[0] = 0;
_verifiers[0] = address(v0);
verifier = new MultipleVersionRollupVerifier(address(chain), _versions, _verifiers);
verifier = new MultipleVersionRollupVerifier(_versions, _verifiers);
}
function testUpdateVerifierVersion0(address _newVerifier) external {
@@ -42,10 +40,6 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
verifier.updateVerifier(0, 0, address(0));
hevm.stopPrank();
// start batch index finalized, revert
hevm.expectRevert(MultipleVersionRollupVerifier.ErrorStartBatchIndexFinalized.selector);
verifier.updateVerifier(0, 0, address(1));
// zero verifier address, revert
hevm.expectRevert(MultipleVersionRollupVerifier.ErrorZeroAddress.selector);
verifier.updateVerifier(0, 1, address(0));
@@ -93,10 +87,6 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
verifier.updateVerifier(version, 0, address(0));
hevm.stopPrank();
// start batch index finalized, revert
hevm.expectRevert(MultipleVersionRollupVerifier.ErrorStartBatchIndexFinalized.selector);
verifier.updateVerifier(version, 0, address(1));
// zero verifier address, revert
hevm.expectRevert(MultipleVersionRollupVerifier.ErrorZeroAddress.selector);
verifier.updateVerifier(version, 1, address(0));

View File

@@ -89,12 +89,6 @@ contract ScrollChainTest is DSTestPlus {
rollup.commitBatch(0, batchHeader0, new bytes[](0), new bytes(0));
hevm.stopPrank();
// invalid version, revert
hevm.startPrank(address(0));
hevm.expectRevert(ScrollChain.ErrorInvalidBatchHeaderVersion.selector);
rollup.commitBatch(2, batchHeader0, new bytes[](1), new bytes(0));
hevm.stopPrank();
// batch header length too small, revert
hevm.startPrank(address(0));
hevm.expectRevert(BatchHeaderV0Codec.ErrorBatchHeaderLengthTooSmall.selector);

View File

@@ -19,7 +19,6 @@ test:
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/verifier/lib
coordinator_api: libzkp ## Builds the Coordinator api instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api

View File

@@ -42,6 +42,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
vkMap: vkMap,
reverseVkMap: reverseMap(vkMap),
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
@@ -64,48 +65,31 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
return bp
}
// Assign load and assign batch tasks
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
type chunkIndexRange struct {
start uint64
end uint64
}
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err
func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange {
var start, end = r.start, r.end
if o.start < r.start {
start = o.start
}
if o.end > r.end {
end = o.end
}
return &chunkIndexRange{start, end}
}
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
// so the hard fork chunk's start_block_number must be ForkBlockNumber
var startChunkIndex uint64 = 0
var endChunkIndex uint64 = math.MaxInt64
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if startChunk == nil {
return nil, nil
}
startChunkIndex = startChunk.Index
}
if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if toChunk != nil {
// toChunk being nil only indicates that we haven't yet reached the fork boundary
// don't need change the endChunkIndex of math.MaxInt64
endChunkIndex = toChunk.Index
}
}
func (r *chunkIndexRange) contains(start, end uint64) bool {
return r.start <= start && r.end > end
}
type getHardForkNameByBatchFunc func(*orm.Batch) (string, error)
func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCtx *proverTaskContext,
chunkRange *chunkIndexRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByBatchFunc) (*coordinatorType.GetTaskSchema, error) {
startChunkIndex, endChunkIndex := chunkRange.start, chunkRange.end
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
var batchTask *orm.Batch
@@ -153,14 +137,26 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
var (
proverVersion = taskCtx.ProverVersion
hardForkName = taskCtx.HardForkName
)
var err error
if getHardForkName != nil {
hardForkName, err = getHardForkName(batchTask)
if err != nil {
log.Error("failed to get hard fork name by batch", "task_id", batchTask.Hash, "error", err.Error())
return nil, ErrCoordinatorInternalFailure
}
}
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProverVersion: proverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
@@ -170,18 +166,18 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
@@ -191,6 +187,114 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
// so the hard fork chunk's start_block_number must be ForkBlockNumber
var startChunkIndex uint64 = 0
var endChunkIndex uint64 = math.MaxInt64
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", hardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if startChunk == nil {
return nil, nil
}
startChunkIndex = startChunk.Index
}
if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", hardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if toChunk != nil {
// toChunk being nil only indicates that we haven't yet reached the fork boundary
// don't need change the endChunkIndex of math.MaxInt64
endChunkIndex = toChunk.Index
}
}
return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil
}
func (bp *BatchProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
chunkRange, err := bp.getChunkRangeByName(ctx, taskCtx.HardForkName)
if err != nil {
return nil, err
}
if chunkRange == nil {
return nil, nil
}
return bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, nil)
}
func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
var (
hardForkNames [2]string
chunkRanges [2]*chunkIndexRange
err error
)
var chunkRange *chunkIndexRange
for i := 0; i < 2; i++ {
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
if err != nil {
return nil, err
}
if chunkRanges[i] != nil {
if chunkRange == nil {
chunkRange = chunkRanges[i]
} else {
chunkRange = chunkRange.merge(*chunkRanges[i])
}
}
}
if chunkRange == nil {
return nil, nil
}
var hardForkName string
getHardForkName := func(batch *orm.Batch) (string, error) {
for i := 0; i < 2; i++ {
if chunkRanges[i] != nil && chunkRanges[i].contains(batch.StartChunkIndex, batch.EndChunkIndex) {
hardForkName = hardForkNames[i]
break
}
}
if hardForkName == "" {
log.Warn("get batch not belongs to any hard fork name", "batch id", batch.Index)
return "", fmt.Errorf("get batch not belongs to any hard fork name, batch id: %d", batch.Index)
}
return hardForkName, nil
}
schema, err := bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, getHardForkName)
if schema != nil && err == nil {
schema.HardForkName = hardForkName
return schema, nil
}
return schema, err
}
// Assign load and assign batch tasks
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
if len(getTaskParameter.VKs) > 0 {
return bp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
}
return bp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
}
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// get chunk from db
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)

View File

@@ -39,6 +39,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
vkMap: vkMap,
reverseVkMap: reverseMap(vkMap),
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
@@ -61,20 +62,11 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
return cp
}
// Assign the chunk proof which need to prove
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
type getHardForkNameByChunkFunc func(*orm.Chunk) (string, error)
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err
}
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCtx *proverTaskContext,
blockRange *blockRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByChunkFunc) (*coordinatorType.GetTaskSchema, error) {
fromBlockNum, toBlockNum := blockRange.from, blockRange.to
if toBlockNum > getTaskParameter.ProverHeight {
toBlockNum = getTaskParameter.ProverHeight + 1
}
@@ -126,14 +118,26 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
var (
proverVersion = taskCtx.ProverVersion
hardForkName = taskCtx.HardForkName
err error
)
if getHardForkName != nil {
hardForkName, err = getHardForkName(chunkTask)
if err != nil {
log.Error("failed to get hard fork name by chunk", "task_id", chunkTask.Hash, "error", err.Error())
return nil, ErrCoordinatorInternalFailure
}
}
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProverVersion: proverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
@@ -142,18 +146,18 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
cp.chunkTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
@@ -163,6 +167,95 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (cp *ChunkProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
blockRange, err := cp.getBlockRangeByName(taskCtx.HardForkName)
if err != nil {
return nil, err
}
return cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, nil)
}
func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
var (
hardForkNames [2]string
blockRanges [2]*blockRange
err error
)
for i := 0; i < 2; i++ {
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
if err != nil {
return nil, err
}
}
blockRange, err := blockRanges[0].merge(*blockRanges[1])
if err != nil {
return nil, err
}
var hardForkName string
getHardForkName := func(chunk *orm.Chunk) (string, error) {
for i := 0; i < 2; i++ {
if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
hardForkName = hardForkNames[i]
break
}
}
if hardForkName == "" {
log.Warn("get chunk not belongs to any hard fork name", "chunk id", chunk.Index)
return "", fmt.Errorf("get chunk not belongs to any hard fork name, chunk id: %d", chunk.Index)
}
return hardForkName, nil
}
schema, err := cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, getHardForkName)
if schema != nil && err == nil {
schema.HardForkName = hardForkName
return schema, nil
}
return schema, err
}
type blockRange struct {
from uint64
to uint64
}
func (r *blockRange) merge(o blockRange) (*blockRange, error) {
if r.from == o.to {
return &blockRange{o.from, r.to}, nil
} else if r.to == o.from {
return &blockRange{r.from, o.to}, nil
}
return nil, fmt.Errorf("two ranges are not adjacent")
}
func (r *blockRange) contains(start, end uint64) bool {
return r.from <= start && r.to > end
}
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
return &blockRange{fromBlockNum, toBlockNum}, nil
}
// Assign the chunk proof which need to prove
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
if len(getTaskParameter.VKs) > 0 {
return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
}
return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)

View File

@@ -29,14 +29,27 @@ type ProverTask interface {
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
}
func reverseMap(input map[string]string) map[string]string {
output := make(map[string]string, len(input))
for k, v := range input {
if k != "" {
output[v] = k
}
}
return output
}
// BaseProverTask a base prover task which contain series functions
type BaseProverTask struct {
cfg *config.Config
db *gorm.DB
vkMap map[string]string
nameForkMap map[string]uint64
forkHeights []uint64
// key is hardForkName, value is vk
vkMap map[string]string
// key is vk, value is hardForkName
reverseVkMap map[string]string
nameForkMap map[string]uint64
forkHeights []uint64
batchOrm *orm.Batch
chunkOrm *orm.Chunk
@@ -74,30 +87,42 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
}
ptc.ProverVersion = proverVersion.(string)
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, fmt.Errorf("get hard fork name from context failed")
}
ptc.HardForkName = hardForkName.(string)
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
}
vk, vkExist := b.vkMap[ptc.HardForkName]
if !vkExist {
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
}
// if the prover has a different vk
if getTaskParameter.VK != vk {
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
// if the prover reports a different prover version
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
// signals that the prover is multi-circuits version
if len(getTaskParameter.VKs) > 0 {
if len(getTaskParameter.VKs) != 2 {
return nil, fmt.Errorf("parameter vks length must be 2")
}
for _, vk := range getTaskParameter.VKs {
if _, exists := b.reverseVkMap[vk]; !exists {
return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk)
}
}
} else {
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, fmt.Errorf("get hard fork name from context failed")
}
ptc.HardForkName = hardForkName.(string)
vk, vkExist := b.vkMap[ptc.HardForkName]
if !vkExist {
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
}
// if the prover has a different vk
if getTaskParameter.VK != vk {
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
// if the prover reports a different prover version
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
// if the prover reports a same prover version
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
// if the prover reports a same prover version
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))

View File

@@ -134,7 +134,12 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if len(pv) == 0 {
return fmt.Errorf("get ProverVersion from context failed")
}
hardForkName := ctx.GetString(coordinatorType.HardForkName)
// use hard_fork_name from parameter first
// if prover support multi hard_forks, the real hard_fork_name is not set to the gin context
hardForkName := proofParameter.HardForkName
if hardForkName == "" {
hardForkName = ctx.GetString(coordinatorType.HardForkName)
}
var proverTask *orm.ProverTask
var err error

View File

@@ -3,8 +3,8 @@
package verifier
/*
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
#cgo LDFLAGS: -lzkp -lm -ldl -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
#include <stdlib.h>
#include "./lib/libzkp.h"
*/
@@ -164,9 +164,7 @@ func (v *Verifier) loadEmbedVK() error {
return err
}
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
return nil
}

View File

@@ -49,7 +49,7 @@ func TestFFI(t *testing.T) {
t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
batchOk, err := v.VerifyBatchProof(batchProof, "curie")
as.NoError(err)
as.True(batchOk)
t.Log("Verified batch proof")

View File

@@ -309,7 +309,7 @@ func (o *Batch) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
db = db.Model(&Batch{})
db = db.Where("hash", hash)
db = db.Where("total_attempts >= ?", maxAttempts)
db = db.Where("proving_status != ?", int(types.ProverProofValid))
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, types.ProvingTaskFailed.String())
}

View File

@@ -332,7 +332,7 @@ func (o *Chunk) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
db = db.Where("total_attempts >= ?", maxAttempts)
db = db.Where("proving_status != ?", int(types.ProverProofValid))
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
return fmt.Errorf("Batch.UpdateProvingStatus error: %w, batch hash: %v, status: %v", err, hash, types.ProvingTaskFailed.String())
}

View File

@@ -2,15 +2,17 @@ package types
// GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct {
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"`
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"` // will be deprecated after all go_prover offline
VKs []string `form:"vks" json:"vks"` // for rust_prover that supporting multi-circuits
}
// GetTaskSchema the schema data return to prover for get prover task
type GetTaskSchema struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
TaskData string `json:"task_data"`
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
TaskData string `json:"task_data"`
HardForkName string `json:"hard_fork_name"`
}

View File

@@ -3,11 +3,12 @@ package types
// SubmitProofParameter the SubmitProof api request parameter
type SubmitProofParameter struct {
// TODO when prover have upgrade, need change this field to required
UUID string `form:"uuid" json:"uuid"`
TaskID string `form:"task_id" json:"task_id" binding:"required"`
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`
Proof string `form:"proof" json:"proof"`
FailureType int `form:"failure_type" json:"failure_type"`
FailureMsg string `form:"failure_msg" json:"failure_msg"`
UUID string `form:"uuid" json:"uuid"`
TaskID string `form:"task_id" json:"task_id" binding:"required"`
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`
Proof string `form:"proof" json:"proof"`
FailureType int `form:"failure_type" json:"failure_type"`
FailureMsg string `form:"failure_msg" json:"failure_msg"`
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
}

View File

@@ -1,74 +0,0 @@
[
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "uint256",
"name": "basefee",
"type": "uint256"
}
],
"name": "BaseFeeSuccess",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "bytes32",
"name": "data",
"type": "bytes32"
}
],
"name": "McopySuccess",
"type": "event"
},
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"internalType": "uint256",
"name": "value",
"type": "uint256"
}
],
"name": "TloadSuccess",
"type": "event"
},
{
"anonymous": false,
"inputs": [],
"name": "TstoreSuccess",
"type": "event"
},
{
"inputs": [],
"name": "useBaseFee",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"name": "useMcopy",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [
{
"internalType": "uint256",
"name": "newValue",
"type": "uint256"
}
],
"name": "useTloadTstore",
"outputs": [],
"stateMutability": "nonpayable",
"type": "function"
}
]

View File

@@ -1,221 +0,0 @@
package main
import (
"context"
"crypto/ecdsa"
"fmt"
"math/big"
"os"
"strings"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
)
func main() {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(os.Getenv("L2_DEPLOYER_PRIVATE_KEY"), "0x"))
if err != nil {
log.Crit("failed to create private key", "err", err)
}
publicKey := privateKey.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
if !ok {
log.Crit("failed to cast public key to ECDSA")
}
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
client, err := ethclient.Dial(os.Getenv("SCROLL_L2_DEPLOYMENT_RPC"))
if err != nil {
log.Crit("failed to connect to network", "err", err)
}
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, new(big.Int).SetUint64(222222))
if err != nil {
log.Crit("failed to initialize keyed transactor with chain ID", "err", err)
}
abiJSON, err := os.ReadFile("abi.json")
if err != nil {
log.Crit("failed to read ABI file", "err", err)
}
l2TestCurieOpcodesMetaData := &bind.MetaData{ABI: string(abiJSON)}
l2TestCurieOpcodesAbi, err := l2TestCurieOpcodesMetaData.GetAbi()
if err != nil {
log.Crit("failed to get abi", "err", err)
}
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
if err != nil {
log.Crit("failed to get pending nonce", "err", err)
}
useTloadTstoreCalldata, err := l2TestCurieOpcodesAbi.Pack("useTloadTstore", new(big.Int).SetUint64(9876543210))
if err != nil {
log.Crit("failed to pack useTloadTstore calldata", "err", err)
}
useMcopyCalldata, err := l2TestCurieOpcodesAbi.Pack("useMcopy")
if err != nil {
log.Crit("failed to pack useMcopy calldata", "err", err)
}
useBaseFee, err := l2TestCurieOpcodesAbi.Pack("useBaseFee")
if err != nil {
log.Crit("failed to pack useBaseFee calldata", "err", err)
}
l2TestCurieOpcodesAddr := common.HexToAddress(os.Getenv("L2_TEST_CURIE_OPCODES_ADDR"))
txTypes := []int{
LegacyTxType,
AccessListTxType,
DynamicFeeTxType,
}
accessLists := []types.AccessList{
nil,
{
{Address: common.HexToAddress("0x0000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
}},
},
{
{Address: common.HexToAddress("0x1000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001")}},
},
{
{Address: common.HexToAddress("0x2000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"),
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"),
}},
{Address: common.HexToAddress("0x3000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"),
}},
},
{
{Address: common.HexToAddress("0x4000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"),
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // repetitive storage key
}},
},
}
for i := 0; i < 50; i++ {
for _, txType := range txTypes {
for _, accessList := range accessLists {
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useTloadTstoreCalldata); err != nil {
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
}
nonce += 1
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useMcopyCalldata); err != nil {
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
}
nonce += 1
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useBaseFee); err != nil {
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
}
nonce += 1
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, nil, []byte{0x01, 0x02, 0x03, 0x04}); err != nil {
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
}
nonce += 1
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, new(big.Int).SetUint64(1), []byte{0x01, 0x02, 0x03, 0x04}); err != nil {
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
}
nonce += 1
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, new(big.Int).SetUint64(1), nil); err != nil {
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
}
nonce += 1
}
}
}
}
const (
LegacyTxType = 1
AccessListTxType = 2
DynamicFeeTxType = 3
)
func sendTransaction(client *ethclient.Client, auth *bind.TransactOpts, txType int, to *common.Address, nonce uint64, accessList types.AccessList, value *big.Int, data []byte) error {
var txData types.TxData
switch txType {
case LegacyTxType:
txData = &types.LegacyTx{
Nonce: nonce,
GasPrice: new(big.Int).SetUint64(1000000000),
Gas: 300000,
To: to,
Value: value,
Data: data,
}
case AccessListTxType:
txData = &types.AccessListTx{
ChainID: new(big.Int).SetUint64(222222),
Nonce: nonce,
GasPrice: new(big.Int).SetUint64(1000000000),
Gas: 300000,
To: to,
Value: value,
Data: data,
AccessList: accessList,
}
case DynamicFeeTxType:
txData = &types.DynamicFeeTx{
ChainID: new(big.Int).SetUint64(222222),
Nonce: nonce,
GasTipCap: new(big.Int).SetUint64(1000000000),
GasFeeCap: new(big.Int).SetUint64(1000000000),
Gas: 300000,
To: to,
Value: value,
Data: data,
AccessList: accessList,
}
default:
return fmt.Errorf("invalid transaction type: %d", txType)
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
return fmt.Errorf("failed to sign tx: %w", err)
}
if err = client.SendTransaction(context.Background(), signedTx); err != nil {
return fmt.Errorf("failed to send tx: %w", err)
}
log.Info("transaction sent", "txHash", signedTx.Hash().Hex())
var receipt *types.Receipt
for {
receipt, err = client.TransactionReceipt(context.Background(), signedTx.Hash())
if err == nil {
if receipt.Status != types.ReceiptStatusSuccessful {
return fmt.Errorf("transaction failed: %s", signedTx.Hash().Hex())
}
break
}
log.Warn("waiting for receipt", "txHash", signedTx.Hash())
time.Sleep(2 * time.Second)
}
log.Info("Sent transaction", "txHash", signedTx.Hash().Hex(), "from", auth.From.Hex(), "nonce", signedTx.Nonce(), "to", to.Hex())
return nil
}

View File

@@ -5,7 +5,6 @@ use (
./common
./coordinator
./database
./prover
./rollup
./tests/integration-test
)

View File

@@ -137,7 +137,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
@@ -262,7 +261,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@@ -321,7 +319,6 @@ github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1C
github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
@@ -381,8 +378,6 @@ github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGu
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce h1:SN43TBs7VaJt9q737eWWqGz0OCg4v+PtUn3RbJcG1o0=
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703 h1:dcvPPyyfe3SocOBwgww3e1wcWjgF85kKDsohY4TXII0=
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
@@ -454,6 +449,7 @@ github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgr
github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4=
@@ -518,7 +514,6 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=

14
prover/.gitignore vendored
View File

@@ -1,14 +0,0 @@
.idea
stack/test_stack
mock/stack
build/bin/
# ignore db file
bbolt_db
core/lib
params/
seed
keystore

5691
prover/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

48
prover/Cargo.toml Normal file
View File

@@ -0,0 +1,48 @@
[package]
name = "prover"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[patch.crates-io]
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
[dependencies]
anyhow = "1.0"
log = "0.4"
env_logger = "0.11.3"
serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", branch = "v0.10", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
prover_next = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
base64 = "0.13.1"
reqwest = { version = "0.12.4", features = ["gzip"] }
reqwest-middleware = "0.3"
reqwest-retry = "0.5"
once_cell = "1.19.0"
hex = "0.4.3"
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
rand = "0.8.5"
eth-keystore = "0.5.0"
rlp = "0.5.2"
tokio = "1.37.0"
sled = "0.34.7"
http = "1.1.0"
clap = { version = "4.5", features = ["derive"] }
ctor = "0.2.8"

View File

@@ -1,49 +1,52 @@
.PHONY: lint docker clean prover mock-prover
.PHONY: prover lint tests_binary
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
ZKEVM_VERSION=$(shell ./print_high_zkevm_version.sh)
ifeq (${ZKEVM_VERSION},)
$(error ZKEVM_VERSION not set)
else
$(info ZKEVM_VERSION is ${ZKEVM_VERSION})
endif
ZKEVM_COMMIT=$(shell echo ${ZKEVM_VERSION} | cut -d " " -f2)
$(info ZKEVM_COMMIT is ${ZKEVM_COMMIT})
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
GIT_REV=$(shell git rev-parse --short HEAD)
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
ifeq (${GO_TAG},)
$(error GO_TAG not set)
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
$(info GO_TAG is ${GO_TAG})
endif
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./core/lib && cp -r ../common/libzkp/interface ./core/lib
find ../common | grep libzktrie.so | xargs -I{} cp {} ./core/lib/
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_GPU_VERSION}
endif
prover: libzkp ## Build the Prover instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/prover ./cmd
prover:
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
rm -rf ./lib && mkdir ./lib
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib
mock-prover: ## Build the mocked Prover instance.
GOBIN=$(PWD)/build/bin go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/prover ./cmd
tests_binary:
cargo clean && cargo test --release --no-run
ls target/release/deps/prover* | grep -v "\.d" | xargs -I{} ln -sf {} ./prover.test
rm -rf ./lib && mkdir ./lib
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib
gpu-prover: libzkp ## Build the GPU Prover instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags gpu -o $(PWD)/build/bin/prover ./cmd
test-prover: libzkp
go test -tags ffi -timeout 0 -v ./prover
test-gpu-prover: libzkp
go test -tags="gpu ffi" -timeout 0 -v ./prover
lastest-zk-version:
curl -sL https://api.github.com/repos/scroll-tech/zkevm-circuits/commits | jq -r ".[0].sha"
lint: ## Lint the files - used for CI
cp -r ../common/libzkp/interface ./core/lib
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder
@rm -rf build/bin
lint:
cargo check --all-features
cargo clippy --all-features --all-targets -- -D warnings
cargo fmt --all

View File

@@ -1,58 +0,0 @@
# Prover
This directory contains the Scroll Prover module.
## Build
```bash
make clean
make prover
```
The built prover binary is in the build/bin directory.
## Test
Make sure to lint before testing (or committing):
```bash
make lint
```
For current unit tests, run:
```bash
make prover
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
export CHAIN_ID=534353 # for Scroll Alpha
go test -v ./...
```
When you need to mock prover results and run other prover tests (using [`core/mock.go`](core/mock.go) instead of [`core/prover.go`](core/prover.go)), run:
```bash
go test -tags="mock_prover" -v -race -covermode=atomic scroll-tech/prover/...
```
## Configure
The prover behavior can be configured using [`config.json`](config.json). Check the code comments of `Config` and `ProverCoreConfig` in [`config/config.go`](config/config.go) for more details.
## Start
1. Set environment variables:
```bash
export CHAIN_ID=534353 # change to correct chain ID
export RUST_MIN_STACK=100000000
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./prover/lib
```
2. Start the module using settings from config.json:
```bash
./build/bin/prover
```

View File

@@ -1,209 +0,0 @@
package client
import (
"context"
"crypto/ecdsa"
"fmt"
"sync"
"time"
"github.com/go-resty/resty/v2"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/prover/config"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
)
// CoordinatorClient is a client used for interacting with the Coordinator service.
type CoordinatorClient struct {
client *resty.Client
proverName string
hardForkName string
priv *ecdsa.PrivateKey
mu sync.Mutex
}
// NewCoordinatorClient constructs a new CoordinatorClient.
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
client := resty.New().
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
SetRetryCount(cfg.RetryCount).
SetRetryWaitTime(time.Duration(cfg.RetryWaitTimeSec) * time.Second).
SetBaseURL(cfg.BaseURL).
AddRetryAfterErrorCondition().
AddRetryCondition(func(response *resty.Response, err error) bool {
if err != nil {
log.Warn("Encountered an error while sending the request. Retrying...", "error", err)
return true
}
return response.IsError()
})
log.Info("successfully initialized prover client",
"base url", cfg.BaseURL,
"connection timeout (second)", cfg.ConnectionTimeoutSec,
"retry count", cfg.RetryCount,
"retry wait time (second)", cfg.RetryWaitTimeSec)
return &CoordinatorClient{
client: client,
proverName: proverName,
hardForkName: hardForkName,
priv: priv,
}, nil
}
// Login completes the entire login process in one function call.
func (c *CoordinatorClient) Login(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
var challengeResult ChallengeResponse
// Get random string
challengeResp, err := c.client.R().
SetHeader("Content-Type", "application/json").
SetResult(&challengeResult).
Get("/coordinator/v1/challenge")
if err != nil {
return fmt.Errorf("get random string failed: %w", err)
}
if challengeResp.StatusCode() != 200 {
return fmt.Errorf("failed to get random string, status code: %v", challengeResp.StatusCode())
}
// Prepare and sign the login request
authMsg := &message.AuthMsg{
Identity: &message.Identity{
ProverVersion: version.Version,
ProverName: c.proverName,
Challenge: challengeResult.Data.Token,
HardForkName: c.hardForkName,
},
}
err = authMsg.SignWithKey(c.priv)
if err != nil {
return fmt.Errorf("signature failed: %w", err)
}
// Login to coordinator
loginReq := &LoginRequest{
Message: struct {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
}{
Challenge: authMsg.Identity.Challenge,
ProverName: authMsg.Identity.ProverName,
ProverVersion: authMsg.Identity.ProverVersion,
HardForkName: authMsg.Identity.HardForkName,
},
Signature: authMsg.Signature,
}
// store JWT token for login requests
c.client.SetAuthToken(challengeResult.Data.Token)
var loginResult LoginResponse
loginResp, err := c.client.R().
SetHeader("Content-Type", "application/json").
SetBody(loginReq).
SetResult(&loginResult).
Post("/coordinator/v1/login")
if err != nil {
return fmt.Errorf("login failed: %w", err)
}
if loginResp.StatusCode() != 200 {
return fmt.Errorf("failed to login, status code: %v", loginResp.StatusCode())
}
if loginResult.ErrCode != types.Success {
return fmt.Errorf("failed to login, error code: %v, error message: %v", loginResult.ErrCode, loginResult.ErrMsg)
}
// store JWT token for future requests
c.client.SetAuthToken(loginResult.Data.Token)
return nil
}
// GetTask sends a request to the coordinator to get prover task.
func (c *CoordinatorClient) GetTask(ctx context.Context, req *GetTaskRequest) (*GetTaskResponse, error) {
var result GetTaskResponse
resp, err := c.client.R().
SetHeader("Content-Type", "application/json").
SetBody(req).
SetResult(&result).
Post("/coordinator/v1/get_task")
if err != nil {
return nil, fmt.Errorf("request for GetTask failed: %w", err)
}
if resp.StatusCode() != 200 {
return nil, fmt.Errorf("failed to get task, status code: %v", resp.StatusCode())
}
if result.ErrCode == types.ErrJWTTokenExpired {
log.Info("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
return nil, fmt.Errorf("JWT expired, re-login failed: %w", err)
}
log.Info("re-login success")
return c.GetTask(ctx, req)
}
if result.ErrCode != types.Success {
return nil, fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
}
return &result, nil
}
// SubmitProof sends a request to the coordinator to submit proof.
func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofRequest) error {
var result SubmitProofResponse
resp, err := c.client.R().
SetHeader("Content-Type", "application/json").
SetBody(req).
SetResult(&result).
Post("/coordinator/v1/submit_proof")
if err != nil {
log.Error("submit proof request failed", "error", err)
return fmt.Errorf("submit proof request failed: %w", ErrCoordinatorConnect)
}
if resp.StatusCode() != 200 {
log.Error("failed to submit proof", "status code", resp.StatusCode())
return fmt.Errorf("failed to submit proof, status code not 200: %w", ErrCoordinatorConnect)
}
if result.ErrCode == types.ErrJWTTokenExpired {
log.Info("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
log.Error("JWT expired, re-login failed", "error", err)
return fmt.Errorf("JWT expired, re-login failed: %w", ErrCoordinatorConnect)
}
log.Info("re-login success")
return c.SubmitProof(ctx, req)
}
if result.ErrCode != types.Success {
return fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
}
return nil
}

View File

@@ -1,77 +0,0 @@
package client
import (
"errors"
"scroll-tech/common/types/message"
)
// ErrCoordinatorConnect connect to coordinator error
var ErrCoordinatorConnect = errors.New("connect coordinator error")
// ChallengeResponse defines the response structure for random API
type ChallengeResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data *struct {
Time string `json:"time"`
Token string `json:"token"`
} `json:"data,omitempty"`
}
// LoginRequest defines the request structure for login API
type LoginRequest struct {
Message struct {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
} `json:"message"`
Signature string `json:"signature"`
}
// LoginResponse defines the response structure for login API
type LoginResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data *struct {
Time string `json:"time"`
Token string `json:"token"`
} `json:"data"`
}
// GetTaskRequest defines the request structure for GetTask API
type GetTaskRequest struct {
TaskType message.ProofType `json:"task_type"`
ProverHeight uint64 `json:"prover_height,omitempty"`
VK string `json:"vk"`
}
// GetTaskResponse defines the response structure for GetTask API
type GetTaskResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data *struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
TaskData string `json:"task_data"`
} `json:"data"`
}
// SubmitProofRequest defines the request structure for the SubmitProof API.
type SubmitProofRequest struct {
UUID string `json:"uuid"`
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
Status int `json:"status"`
Proof string `json:"proof"`
FailureType int `json:"failure_type,omitempty"`
FailureMsg string `json:"failure_msg,omitempty"`
}
// SubmitProofResponse defines the response structure for the SubmitProof API.
type SubmitProofResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
}

View File

@@ -1,77 +0,0 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/prover"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/prover/config"
)
var app *cli.App
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "prover"
app.Usage = "The Scroll L2 Prover"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `prover-test` app for integration-test.
utils.RegisterSimulation(app, utils.ChunkProverApp)
utils.RegisterSimulation(app, utils.BatchProverApp)
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// Create prover
r, err := prover.NewProver(context.Background(), cfg)
if err != nil {
return err
}
// Start prover.
r.Start()
defer r.Stop()
log.Info(
"prover start successfully",
"name", cfg.ProverName, "type", cfg.Core.ProofType,
"publickey", r.PublicKey(), "version", version.Version,
)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run the prover cmd func.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -1,29 +0,0 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
"scroll-tech/common/version"
)
func TestRunChunkProver(t *testing.T) {
prover := cmd.NewCmd(string(utils.ChunkProverApp), "--version")
defer prover.WaitExit()
// wait result
prover.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("prover version %s", version.Version))
prover.RunApp(nil)
}
func TestRunBatchProver(t *testing.T) {
prover := cmd.NewCmd(string(utils.BatchProverApp), "--version")
defer prover.WaitExit()
// wait result
prover.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("prover version %s", version.Version))
prover.RunApp(nil)
}

View File

@@ -1,128 +0,0 @@
package app
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/cmd"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/prover/config"
)
var (
proverIndex int
)
func getIndex() int {
defer func() { proverIndex++ }()
return proverIndex
}
// ProverApp prover-test client manager.
type ProverApp struct {
Config *config.Config
testApps *testcontainers.TestcontainerApps
originFile string
proverFile string
bboltDB string
index int
name string
args []string
*cmd.Cmd
}
// NewProverApp return a new proverApp manager.
func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
var proofType message.ProofType
switch mockName {
case utils.ChunkProverApp:
proofType = message.ProofTypeChunk
case utils.BatchProverApp:
proofType = message.ProofTypeBatch
default:
return nil
}
name := string(mockName)
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", testApps.Timestamp, name)
proverApp := &ProverApp{
testApps: testApps,
originFile: file,
proverFile: proverFile,
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", testApps.Timestamp, name),
index: getIndex(),
name: name,
args: []string{"--log.debug", "--config", proverFile},
}
proverApp.Cmd = cmd.NewCmd(proverApp.name, proverApp.args...)
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
panic(err)
}
return proverApp
}
// RunApp run prover-test child process by multi parameters.
func (r *ProverApp) RunApp(t *testing.T) {
r.Cmd.RunApp(func() bool { return r.Cmd.WaitResult(t, time.Second*40, "prover start successfully") })
}
// Free stop and release prover-test.
func (r *ProverApp) Free() {
if !utils.IsNil(r.Cmd) {
r.Cmd.WaitExit()
}
_ = os.Remove(r.proverFile)
_ = os.Remove(r.Config.KeystorePath)
_ = os.Remove(r.bboltDB)
}
// MockConfig creates a new prover config.
func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.ProofType) error {
cfg, err := config.NewConfig(r.originFile)
if err != nil {
return err
}
cfg.ProverName = fmt.Sprintf("%s_%d", r.name, r.index)
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.testApps.Timestamp, cfg.ProverName)
endpoint, err := r.testApps.GetL2GethEndPoint()
if err != nil {
return err
}
cfg.L2Geth.Endpoint = endpoint
cfg.L2Geth.Confirmations = rpc.LatestBlockNumber
// Reuse l1geth's keystore file
cfg.KeystorePassword = "scrolltest"
cfg.DBPath = r.bboltDB
// Create keystore file.
_, err = utils.LoadOrCreateKey(cfg.KeystorePath, cfg.KeystorePassword)
if err != nil {
return err
}
cfg.Coordinator.BaseURL = httpURL
cfg.Coordinator.RetryCount = 10
cfg.Coordinator.RetryWaitTimeSec = 10
cfg.Coordinator.ConnectionTimeoutSec = 30
cfg.Core.ProofType = proofType
r.Config = cfg
if !store {
return nil
}
data, err := json.Marshal(r.Config)
if err != nil {
return err
}
return os.WriteFile(r.proverFile, data, 0600)
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/prover/cmd/app"
func main() {
app.Run()
}

View File

@@ -1,13 +1,18 @@
{
"prover_name": "prover-1",
"hard_fork_name": "homestead",
"keystore_path": "keystore.json",
"keystore_password": "prover-pwd",
"db_path": "unique-db-path-for-prover-1",
"core": {
"proof_type": 2,
"low_version_circuit": {
"hard_fork_name": "bernoulli",
"params_path": "params",
"assets_path": "assets",
"proof_type": 2
"assets_path": "assets"
},
"high_version_circuit": {
"hard_fork_name": "curie",
"params_path": "params",
"assets_path": "assets"
},
"coordinator": {
"base_url": "http://localhost:8555",
@@ -16,7 +21,6 @@
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "http://localhost:9999",
"confirmations": "0x1"
"endpoint": "http://localhost:9999"
}
}

View File

@@ -1,66 +0,0 @@
package config
import (
"encoding/json"
"os"
"path/filepath"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/types/message"
)
// Config loads prover configuration items.
type Config struct {
ProverName string `json:"prover_name"`
HardForkName string `json:"hard_fork_name"`
KeystorePath string `json:"keystore_path"`
KeystorePassword string `json:"keystore_password"`
Core *ProverCoreConfig `json:"core"`
DBPath string `json:"db_path"`
Coordinator *CoordinatorConfig `json:"coordinator"`
L2Geth *L2GethConfig `json:"l2geth,omitempty"` // only for chunk_prover
}
// ProverCoreConfig load zk prover config.
type ProverCoreConfig struct {
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
ProofType message.ProofType `json:"proof_type,omitempty"` // 1: chunk prover (default type), 2: batch prover
DumpDir string `json:"dump_dir,omitempty"`
}
// CoordinatorConfig represents the configuration for the Coordinator client.
type CoordinatorConfig struct {
BaseURL string `json:"base_url"`
RetryCount int `json:"retry_count"`
RetryWaitTimeSec int `json:"retry_wait_time_sec"`
ConnectionTimeoutSec int `json:"connection_timeout_sec"`
}
// L2GethConfig represents the configuration for the l2geth client.
type L2GethConfig struct {
Endpoint string `json:"endpoint"`
Confirmations rpc.BlockNumber `json:"confirmations"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &Config{}
if err = json.Unmarshal(buf, cfg); err != nil {
return nil, err
}
if !filepath.IsAbs(cfg.DBPath) {
if cfg.DBPath, err = filepath.Abs(cfg.DBPath); err != nil {
log.Error("Failed to get abs path", "error", err)
return nil, err
}
}
return cfg, nil
}

View File

@@ -1,45 +0,0 @@
//go:build mock_prover
package core
import (
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"scroll-tech/common/types/message"
"scroll-tech/prover/config"
)
// ProverCore sends block-traces to rust-prover through socket and get back the zk-proof.
type ProverCore struct {
cfg *config.ProverCoreConfig
VK string
}
// NewProverCore inits a ProverCore object.
func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
return &ProverCore{cfg: cfg}, nil
}
func (p *ProverCore) ProveChunk(taskID string, traces []*types.BlockTrace) (*message.ChunkProof, error) {
_empty := common.BigToHash(big.NewInt(0))
return &message.ChunkProof{
StorageTrace: _empty[:],
Protocol: _empty[:],
Proof: _empty[:],
Instances: _empty[:],
Vk: _empty[:],
}, nil
}
func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo, chunkProofs []*message.ChunkProof) (*message.BatchProof, error) {
_empty := common.BigToHash(big.NewInt(0))
return &message.BatchProof{
Proof: _empty[:],
Instances: _empty[:],
Vk: _empty[:],
}, nil
}

View File

@@ -1,258 +0,0 @@
//go:build !mock_prover
package core
/*
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
#include <stdlib.h>
#include "./lib/libzkp.h"
*/
import "C" //nolint:typecheck
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"unsafe"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/message"
"scroll-tech/prover/config"
)
// ProverCore sends block-traces to rust-prover through ffi and get back the zk-proof.
type ProverCore struct {
cfg *config.ProverCoreConfig
VK string
}
// NewProverCore inits a ProverCore object.
func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath)
defer func() {
C.free(unsafe.Pointer(paramsPathStr))
C.free(unsafe.Pointer(assetsPathStr))
}()
var vk string
var rawVK *C.char
if cfg.ProofType == message.ProofTypeBatch {
C.init_batch_prover(paramsPathStr, assetsPathStr)
rawVK = C.get_batch_vk()
} else if cfg.ProofType == message.ProofTypeChunk {
C.init_chunk_prover(paramsPathStr, assetsPathStr)
rawVK = C.get_chunk_vk()
}
defer C.free_c_chars(rawVK)
if rawVK != nil {
vk = C.GoString(rawVK)
}
if cfg.DumpDir != "" {
err := os.MkdirAll(cfg.DumpDir, os.ModePerm)
if err != nil {
return nil, err
}
log.Info("Enabled dump_proof", "dir", cfg.DumpDir)
}
return &ProverCore{cfg: cfg, VK: vk}, nil
}
// ProveBatch call rust ffi to generate batch proof.
func (p *ProverCore) ProveBatch(taskID string, chunkInfos []*message.ChunkInfo, chunkProofs []*message.ChunkProof) (*message.BatchProof, error) {
if p.cfg.ProofType != message.ProofTypeBatch {
return nil, fmt.Errorf("prover is not a batch-prover (type: %v), but is trying to prove a batch", p.cfg.ProofType)
}
chunkInfosByt, err := json.Marshal(chunkInfos)
if err != nil {
return nil, err
}
chunkProofsByt, err := json.Marshal(chunkProofs)
if err != nil {
return nil, err
}
isValid, err := p.checkChunkProofs(chunkProofsByt)
if err != nil {
return nil, err
}
if !isValid {
return nil, fmt.Errorf("non-match chunk protocol, task-id: %s", taskID)
}
proofByt, err := p.proveBatch(chunkInfosByt, chunkProofsByt)
if err != nil {
return nil, fmt.Errorf("failed to generate batch proof: %v", err)
}
err = p.mayDumpProof(taskID, proofByt)
if err != nil {
log.Error("Dump batch proof failed", "task-id", taskID, "error", err)
}
zkProof := &message.BatchProof{}
return zkProof, json.Unmarshal(proofByt, zkProof)
}
// ProveChunk call rust ffi to generate chunk proof.
func (p *ProverCore) ProveChunk(taskID string, traces []*types.BlockTrace) (*message.ChunkProof, error) {
if p.cfg.ProofType != message.ProofTypeChunk {
return nil, fmt.Errorf("prover is not a chunk-prover (type: %v), but is trying to prove a chunk", p.cfg.ProofType)
}
tracesByt, err := json.Marshal(traces)
if err != nil {
return nil, err
}
proofByt, err := p.proveChunk(tracesByt)
if err != nil {
return nil, err
}
err = p.mayDumpProof(taskID, proofByt)
if err != nil {
log.Error("Dump chunk proof failed", "task-id", taskID, "error", err)
}
zkProof := &message.ChunkProof{}
return zkProof, json.Unmarshal(proofByt, zkProof)
}
// TracesToChunkInfo convert traces to chunk info
func (p *ProverCore) TracesToChunkInfo(traces []*types.BlockTrace) (*message.ChunkInfo, error) {
tracesByt, err := json.Marshal(traces)
if err != nil {
return nil, err
}
chunkInfoByt := p.tracesToChunkInfo(tracesByt)
chunkInfo := &message.ChunkInfo{}
return chunkInfo, json.Unmarshal(chunkInfoByt, chunkInfo)
}
// CheckChunkProofsResponse represents the result of a chunk proof checking operation.
// Ok indicates whether the proof checking was successful.
// Error provides additional details in case the check failed.
type CheckChunkProofsResponse struct {
Ok bool `json:"ok"`
Error string `json:"error,omitempty"`
}
// ProofResult encapsulates the result from generating a proof.
// Message holds the generated proof in byte slice format.
// Error provides additional details in case the proof generation failed.
type ProofResult struct {
Message []byte `json:"message,omitempty"`
Error string `json:"error,omitempty"`
}
func (p *ProverCore) checkChunkProofs(chunkProofsByt []byte) (bool, error) {
chunkProofsStr := C.CString(string(chunkProofsByt))
defer C.free(unsafe.Pointer(chunkProofsStr))
log.Info("Start to check chunk proofs ...")
cResult := C.check_chunk_proofs(chunkProofsStr)
defer C.free_c_chars(cResult)
log.Info("Finish checking chunk proofs!")
var result CheckChunkProofsResponse
err := json.Unmarshal([]byte(C.GoString(cResult)), &result)
if err != nil {
return false, fmt.Errorf("failed to parse check chunk proofs result: %v", err)
}
if result.Error != "" {
return false, fmt.Errorf("failed to check chunk proofs: %s", result.Error)
}
return result.Ok, nil
}
func (p *ProverCore) proveBatch(chunkInfosByt []byte, chunkProofsByt []byte) ([]byte, error) {
chunkInfosStr := C.CString(string(chunkInfosByt))
chunkProofsStr := C.CString(string(chunkProofsByt))
defer func() {
C.free(unsafe.Pointer(chunkInfosStr))
C.free(unsafe.Pointer(chunkProofsStr))
}()
log.Info("Start to create batch proof ...")
bResult := C.gen_batch_proof(chunkInfosStr, chunkProofsStr)
defer C.free_c_chars(bResult)
log.Info("Finish creating batch proof!")
var result ProofResult
err := json.Unmarshal([]byte(C.GoString(bResult)), &result)
if err != nil {
return nil, fmt.Errorf("failed to parse batch proof result: %v", err)
}
if result.Error != "" {
return nil, fmt.Errorf("failed to generate batch proof: %s", result.Error)
}
return result.Message, nil
}
func (p *ProverCore) proveChunk(tracesByt []byte) ([]byte, error) {
tracesStr := C.CString(string(tracesByt))
defer C.free(unsafe.Pointer(tracesStr))
log.Info("Start to create chunk proof ...")
cProof := C.gen_chunk_proof(tracesStr)
defer C.free_c_chars(cProof)
log.Info("Finish creating chunk proof!")
var result ProofResult
err := json.Unmarshal([]byte(C.GoString(cProof)), &result)
if err != nil {
return nil, fmt.Errorf("failed to parse chunk proof result: %v", err)
}
if result.Error != "" {
return nil, fmt.Errorf("failed to generate chunk proof: %s", result.Error)
}
return result.Message, nil
}
func (p *ProverCore) mayDumpProof(id string, proofByt []byte) error {
if p.cfg.DumpDir == "" {
return nil
}
path := filepath.Join(p.cfg.DumpDir, id)
f, err := os.Create(path)
if err != nil {
return err
}
defer func() {
if err = f.Close(); err != nil {
log.Error("failed to close proof dump file", "id", id, "error", err)
}
}()
log.Info("Saving proof", "task-id", id)
_, err = f.Write(proofByt)
return err
}
func (p *ProverCore) tracesToChunkInfo(tracesByt []byte) []byte {
tracesStr := C.CString(string(tracesByt))
defer C.free(unsafe.Pointer(tracesStr))
cChunkInfo := C.block_traces_to_chunk_info(tracesStr)
defer C.free_c_chars(cChunkInfo)
chunkInfo := C.GoString(cChunkInfo)
return []byte(chunkInfo)
}

View File

@@ -1,154 +0,0 @@
//go:build ffi
// go test -v -race -gcflags="-l" -ldflags="-s=false" -tags ffi ./...
package core_test
import (
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/message"
"scroll-tech/prover/config"
"scroll-tech/prover/core"
)
var (
paramsPath = flag.String("params", "/assets/test_params", "params dir")
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
proofDumpPath = flag.String("dump", "/assets/proof_data", "the path proofs dump to")
batchDirPath = flag.String("batch-dir", "/assets/traces/batch_24", "batch directory")
batchVkPath = flag.String("batch-vk", "/assets/test_assets/agg_vk.vkey", "batch vk")
chunkVkPath = flag.String("chunk-vk", "/assets/test_assets/chunk_vk.vkey", "chunk vk")
)
func TestFFI(t *testing.T) {
as := assert.New(t)
chunkProverConfig := &config.ProverCoreConfig{
DumpDir: *proofDumpPath,
ParamsPath: *paramsPath,
AssetsPath: *assetsPath,
ProofType: message.ProofTypeChunk,
}
chunkProverCore, err := core.NewProverCore(chunkProverConfig)
as.NoError(err)
t.Log("Constructed chunk prover")
as.Equal(chunkProverCore.VK, readVk(*chunkVkPath, as))
t.Log("Chunk VK must be available when init")
// Get the list of subdirectories (chunks)
chunkDirs, err := os.ReadDir(*batchDirPath)
as.NoError(err)
sort.Slice(chunkDirs, func(i, j int) bool {
return chunkDirs[i].Name() < chunkDirs[j].Name()
})
chunkInfos := make([]*message.ChunkInfo, 0, len(chunkDirs))
chunkProofs := make([]*message.ChunkProof, 0, len(chunkDirs))
for i, dir := range chunkDirs {
if dir.IsDir() {
chunkPath := filepath.Join(*batchDirPath, dir.Name())
chunkTrace := readChunkTrace(chunkPath, as)
t.Logf("Loaded chunk trace %d", i+1)
chunkInfo, err := chunkProverCore.TracesToChunkInfo(chunkTrace)
as.NoError(err)
chunkInfos = append(chunkInfos, chunkInfo)
t.Logf("Converted to chunk info %d", i+1)
chunkProof, err := chunkProverCore.ProveChunk(fmt.Sprintf("chunk_proof%d", i+1), chunkTrace)
as.NoError(err)
chunkProofs = append(chunkProofs, chunkProof)
t.Logf("Generated and dumped chunk proof %d", i+1)
}
}
as.Equal(chunkProverCore.VK, readVk(*chunkVkPath, as))
t.Log("Chunk VKs must be equal after proving")
batchProverConfig := &config.ProverCoreConfig{
DumpDir: *proofDumpPath,
ParamsPath: *paramsPath,
AssetsPath: *assetsPath,
ProofType: message.ProofTypeBatch,
}
batchProverCore, err := core.NewProverCore(batchProverConfig)
as.NoError(err)
as.Equal(batchProverCore.VK, readVk(*batchVkPath, as))
t.Log("Batch VK must be available when init")
_, err = batchProverCore.ProveBatch("batch_proof", chunkInfos, chunkProofs)
as.NoError(err)
t.Log("Generated and dumped batch proof")
as.Equal(batchProverCore.VK, readVk(*batchVkPath, as))
t.Log("Batch VKs must be equal after proving")
}
func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
fileInfo, err := os.Stat(filePat)
as.NoError(err)
var traces []*types.BlockTrace
readFile := func(path string) {
f, err := os.Open(path)
as.NoError(err)
defer func() {
as.NoError(f.Close())
}()
byt, err := io.ReadAll(f)
as.NoError(err)
trace := &types.BlockTrace{}
as.NoError(json.Unmarshal(byt, trace))
traces = append(traces, trace)
}
if fileInfo.IsDir() {
files, err := os.ReadDir(filePat)
as.NoError(err)
// Sort files alphabetically
sort.Slice(files, func(i, j int) bool {
return files[i].Name() < files[j].Name()
})
for _, file := range files {
if !file.IsDir() {
readFile(filepath.Join(filePat, file.Name()))
}
}
} else {
readFile(filePat)
}
return traces
}
func readVk(filePat string, as *assert.Assertions) string {
f, err := os.Open(filePat)
as.NoError(err)
defer func() {
as.NoError(f.Close())
}()
byt, err := io.ReadAll(f)
as.NoError(err)
return base64.StdEncoding.EncodeToString(byt)
}

Some files were not shown because too many files have changed in this diff Show More