mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
100 Commits
v4.4.46
...
bump-versi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b55c858980 | ||
|
|
5d6b5a89f4 | ||
|
|
4ee459a602 | ||
|
|
276385fd0a | ||
|
|
82fb15de3b | ||
|
|
5204ad50e0 | ||
|
|
f824fb0efc | ||
|
|
a55c7bdc77 | ||
|
|
47b1a037a9 | ||
|
|
ae34020c34 | ||
|
|
fa9fab6e98 | ||
|
|
c4f869a33a | ||
|
|
0cee9a51e6 | ||
|
|
97de988228 | ||
|
|
a12175dafc | ||
|
|
fedfa04c2b | ||
|
|
0d8b00c3de | ||
|
|
826357ab5d | ||
|
|
d26381cba3 | ||
|
|
0e65686ce4 | ||
|
|
6dd878eaca | ||
|
|
a18fe06440 | ||
|
|
3ac69bec51 | ||
|
|
e80f030246 | ||
|
|
a77f1413ee | ||
|
|
5b62692098 | ||
|
|
4f34e90f00 | ||
|
|
6b1b822c81 | ||
|
|
a34c01d90b | ||
|
|
0578aab3ae | ||
|
|
55dfbf6735 | ||
|
|
f3ddf43439 | ||
|
|
a2582dcc3f | ||
|
|
0a1868cec1 | ||
|
|
fcfbc53252 | ||
|
|
13c8605211 | ||
|
|
228cba48b7 | ||
|
|
b7c5ba9046 | ||
|
|
5f7973528e | ||
|
|
69a7339bab | ||
|
|
056ebdca1c | ||
|
|
7a386ad807 | ||
|
|
5fb93c4dc3 | ||
|
|
a5e2d71ebd | ||
|
|
6b837c01f3 | ||
|
|
5b6b145753 | ||
|
|
c07975acdf | ||
|
|
dfdb2ecf07 | ||
|
|
a6f2457040 | ||
|
|
fa0927c5dc | ||
|
|
f92029aaeb | ||
|
|
45b23edde9 | ||
|
|
33b1b3cb51 | ||
|
|
51c930d7da | ||
|
|
4cfc5511fb | ||
|
|
06beb5dca3 | ||
|
|
968a396b5e | ||
|
|
fa2401c081 | ||
|
|
438a9fb1d6 | ||
|
|
1c22307f08 | ||
|
|
22dd3901f0 | ||
|
|
54d823677f | ||
|
|
e3cf2cb82b | ||
|
|
b6025425ac | ||
|
|
3ab5752276 | ||
|
|
c4ba0f9178 | ||
|
|
f0e8fbe738 | ||
|
|
2059b49624 | ||
|
|
bc8f9dbc83 | ||
|
|
cc2441d42d | ||
|
|
5d965d49db | ||
|
|
233fff0333 | ||
|
|
41ce22be05 | ||
|
|
3353e36d16 | ||
|
|
f2a656d67b | ||
|
|
d6b9176ef2 | ||
|
|
09f087ab16 | ||
|
|
e3c87e6547 | ||
|
|
a1d1cbc2e5 | ||
|
|
404a625cb4 | ||
|
|
736d850be1 | ||
|
|
246bf38e69 | ||
|
|
bce33834ab | ||
|
|
ae8c858a07 | ||
|
|
2ee1c898f0 | ||
|
|
a528103260 | ||
|
|
7b00055a5d | ||
|
|
bf48417433 | ||
|
|
5c9243e03f | ||
|
|
fcfd97ab6c | ||
|
|
e453c23b16 | ||
|
|
432c98b651 | ||
|
|
865d65ac3a | ||
|
|
7f86aecefd | ||
|
|
4470c814c3 | ||
|
|
31a6141fc6 | ||
|
|
ed4bda601f | ||
|
|
cd99f380ce | ||
|
|
a8663eb447 | ||
|
|
fa80b3e4a8 |
12
.github/workflows/common.yml
vendored
12
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
toolchain: nightly-2024-12-06
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -42,11 +42,11 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
# - name: Lint
|
||||
# working-directory: 'common'
|
||||
# run: |
|
||||
# rm -rf $HOME/.cache/golangci-lint
|
||||
# make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
119
.github/workflows/docker.yml
vendored
119
.github/workflows/docker.yml
vendored
@@ -49,8 +49,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -94,8 +94,53 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
blob_uploader:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: blob-uploader
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: blob-uploader
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/blob_uploader.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -139,8 +184,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -184,8 +229,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -229,8 +274,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -274,8 +319,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -307,6 +352,48 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for repositories and clone them
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
chmod 700 ~/.ssh
|
||||
|
||||
# Setup for plonky3-gpu
|
||||
echo "${{ secrets.PLONKY3_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/plonky3_gpu_key
|
||||
chmod 600 ~/.ssh/plonky3_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/plonky3_gpu_key 2>/dev/null
|
||||
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
echo "Loaded plonky3-gpu key"
|
||||
|
||||
# Clone plonky3-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
|
||||
|
||||
# Setup for openvm-stark-gpu
|
||||
echo "${{ secrets.OPENVM_STARK_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_stark_gpu_key
|
||||
chmod 600 ~/.ssh/openvm_stark_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/openvm_stark_gpu_key 2>/dev/null
|
||||
echo "Loaded openvm-stark-gpu key"
|
||||
|
||||
# Clone openvm-stark-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
|
||||
|
||||
# Setup for openvm-gpu
|
||||
echo "${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_gpu_key
|
||||
chmod 600 ~/.ssh/openvm_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/openvm_gpu_key 2>/dev/null
|
||||
echo "Loaded openvm-gpu key"
|
||||
|
||||
# Clone openvm-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
|
||||
|
||||
# Show number of loaded keys
|
||||
echo "Number of loaded keys: $(ssh-add -l | wc -l)"
|
||||
|
||||
- name: Checkout specific commits
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/checkout_all.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
@@ -318,8 +405,8 @@ jobs:
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
@@ -363,7 +450,7 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
6
.github/workflows/intermediate-docker.yml
vendored
6
.github/workflows/intermediate-docker.yml
vendored
@@ -9,9 +9,13 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- "1.20"
|
||||
- "1.20.14"
|
||||
- "1.21"
|
||||
- "1.21.13"
|
||||
- "1.22"
|
||||
- "1.22.12"
|
||||
- "1.23"
|
||||
- "1.23.7"
|
||||
default: "1.21"
|
||||
RUST_VERSION:
|
||||
description: "Rust toolchain version"
|
||||
@@ -20,6 +24,7 @@ on:
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
default: "nightly-2023-12-03"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
@@ -43,6 +48,7 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- 0.1.41
|
||||
- 0.1.71
|
||||
BASE_IMAGE:
|
||||
description: "which intermediate image you want to update"
|
||||
required: true
|
||||
|
||||
99
.github/workflows/prover.yml
vendored
99
.github/workflows/prover.yml
vendored
@@ -1,99 +0,0 @@
|
||||
name: Prover
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'prover'
|
||||
|
||||
jobs:
|
||||
skip_check:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
paths_ignore: '["**/README.md"]'
|
||||
|
||||
fmt:
|
||||
needs: [skip_check]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: rustfmt
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Cargo check
|
||||
run: cargo check --all-features
|
||||
- name: Cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
needs: [skip_check, fmt]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: clippy
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Run clippy
|
||||
run: cargo clippy --all-features --all-targets -- -D warnings
|
||||
|
||||
compile:
|
||||
needs: [skip_check, clippy]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Test
|
||||
run: |
|
||||
make prover
|
||||
@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
|
||||
|
||||
## Contribute to Scroll
|
||||
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
|
||||
## Issues and PRs
|
||||
|
||||
|
||||
30
Makefile
30
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=scroll-v5.5.1
|
||||
L2GETH_TAG=scroll-v5.8.23
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
@@ -8,12 +8,12 @@ help: ## Display this help message
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
update: ## Update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/common/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/rollup/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
|
||||
lint: ## The code's format and security checks
|
||||
make -C rollup lint
|
||||
@@ -31,17 +31,17 @@ fmt: ## Format the code
|
||||
cd $(PWD)/rollup/ && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go mod tidy
|
||||
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
goimports -local $(PWD)/database/ -w .
|
||||
goimports -local $(PWD)/rollup/ -w .
|
||||
goimports -local $(PWD)/tests/integration-test/ -w .
|
||||
goimports -local scroll-tech/bridge-history-api/ -w .
|
||||
goimports -local scroll-tech/common/ -w .
|
||||
goimports -local scroll-tech/coordinator/ -w .
|
||||
goimports -local scroll-tech/database/ -w .
|
||||
goimports -local scroll-tech/rollup/ -w .
|
||||
goimports -local scroll-tech/tests/integration-test/ -w .
|
||||
|
||||
dev_docker: ## Build docker images for development/testing usages
|
||||
docker pull postgres
|
||||
docker build -t scroll_l1geth ./common/testcontainers/docker/l1geth/
|
||||
docker build -t scroll_l2geth ./common/testcontainers/docker/l2geth/
|
||||
docker build -t scroll_l1geth --platform linux/amd64 ./common/testcontainers/docker/l1geth/
|
||||
docker build -t scroll_l2geth --platform linux/amd64 ./common/testcontainers/docker/l2geth/
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
@@ -37,6 +37,6 @@ reset-env:
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
|
||||
|
||||
bridgehistoryapi-docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile --platform=linux/amd64
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile --platform=linux/amd64
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile --platform=linux/amd64
|
||||
|
||||
File diff suppressed because one or more lines are too long
13
bridge-history-api/abi/backend_abi_test.go
Normal file
13
bridge-history-api/abi/backend_abi_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package backendabi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEventSignatures(t *testing.T) {
|
||||
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), L1RevertBatchV0EventSig)
|
||||
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,uint256)")), L1RevertBatchV7EventSig)
|
||||
}
|
||||
@@ -68,7 +68,10 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
observability.Server(ctx, db)
|
||||
|
||||
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
|
||||
l1MessageFetcher, err := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
|
||||
if err != nil {
|
||||
log.Crit("failed to create L1MessageFetcher", "err", err)
|
||||
}
|
||||
go l1MessageFetcher.Start()
|
||||
|
||||
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)
|
||||
|
||||
@@ -19,9 +19,11 @@
|
||||
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
|
||||
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
|
||||
"MessageQueueV2Addr": "0x0000000000000000000000000000000000000000",
|
||||
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4",
|
||||
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
|
||||
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000"
|
||||
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
|
||||
"BlobScanAPIEndpoint": "https://api.blobscan.com/blobs/"
|
||||
},
|
||||
"L2": {
|
||||
"confirmation": 0,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
module scroll-tech/bridge-history-api
|
||||
|
||||
go 1.21
|
||||
go 1.22
|
||||
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/gin-contrib/cors v1.5.0
|
||||
@@ -8,41 +10,43 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sync v0.11.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.20.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/consensys/gnark-crypto v0.13.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
|
||||
github.com/docker/docker v26.1.0+incompatible // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
@@ -55,15 +59,15 @@ require (
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/holiman/uint256 v1.3.2 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
@@ -89,15 +93,15 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
@@ -108,8 +112,8 @@ require (
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
||||
@@ -11,9 +11,11 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
@@ -23,8 +25,8 @@ github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
|
||||
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -53,20 +55,20 @@ github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
|
||||
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
@@ -86,8 +88,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
|
||||
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
|
||||
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@@ -108,9 +110,8 @@ github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
|
||||
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
|
||||
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
|
||||
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
|
||||
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
@@ -170,13 +171,13 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
|
||||
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
@@ -202,8 +203,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@@ -302,16 +303,16 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
@@ -324,6 +325,8 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
@@ -340,14 +343,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
|
||||
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
@@ -397,8 +400,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -415,13 +418,14 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
@@ -30,9 +30,14 @@ type FetcherConfig struct {
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
GatewayRouterAddr string `json:"GatewayRouterAddr"`
|
||||
MessageQueueAddr string `json:"MessageQueueAddr"`
|
||||
MessageQueueV2Addr string `json:"MessageQueueV2Addr"`
|
||||
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
|
||||
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
|
||||
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
|
||||
|
||||
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
|
||||
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
|
||||
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
|
||||
@@ -2,6 +2,7 @@ package fetcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
@@ -35,13 +37,32 @@ type L1MessageFetcher struct {
|
||||
}
|
||||
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
|
||||
blobClient := blob_client.NewBlobClients()
|
||||
if cfg.BeaconNodeAPIEndpoint != "" {
|
||||
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
|
||||
if err != nil {
|
||||
log.Warn("failed to create BeaconNodeClient", "err", err)
|
||||
} else {
|
||||
blobClient.AddBlobClient(beaconNodeClient)
|
||||
}
|
||||
}
|
||||
if cfg.BlobScanAPIEndpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewBlobScanClient(cfg.BlobScanAPIEndpoint))
|
||||
}
|
||||
if cfg.BlockNativeAPIEndpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewBlockNativeClient(cfg.BlockNativeAPIEndpoint))
|
||||
}
|
||||
if blobClient.Size() == 0 {
|
||||
return nil, fmt.Errorf("no blob client is configured")
|
||||
}
|
||||
|
||||
c := &L1MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
|
||||
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
|
||||
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client, blobClient),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -58,7 +79,7 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
|
||||
Help: "Latest blockchain height the L1 message fetcher has synced with.",
|
||||
})
|
||||
|
||||
return c
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Start starts the L1 message fetching process.
|
||||
|
||||
@@ -2,13 +2,16 @@ package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
@@ -19,15 +22,17 @@ import (
|
||||
|
||||
// L1EventParser the l1 event parser
|
||||
type L1EventParser struct {
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
blobClient blob_client.BlobClient
|
||||
}
|
||||
|
||||
// NewL1EventParser creates l1 event parser
|
||||
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
|
||||
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client, blobClient blob_client.BlobClient) *L1EventParser {
|
||||
return &L1EventParser{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
blobClient: blobClient,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +237,21 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
|
||||
}
|
||||
|
||||
// ParseL1BatchEventLogs parses L1 watched batch events.
|
||||
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
|
||||
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client, blockTimestampsMap map[uint64]uint64) ([]*orm.BatchEvent, error) {
|
||||
// Since multiple CommitBatch events per transaction is introduced >= CodecV7,
|
||||
// with one transaction carrying multiple blobs,
|
||||
// each CommitBatch event corresponds to a blob containing block range data.
|
||||
// To correctly process these events, we need to:
|
||||
// 1. Parsing the associated blob data to extract the block range for each event
|
||||
// 2. Tracking the parent batch hash for each processed CommitBatch event, to:
|
||||
// - Validate the batch hash, since parent batch hash is needed to calculate the batch hash
|
||||
// - Derive the index of the current batch by the number of parent batch hashes tracked
|
||||
// In commitBatches and commitAndFinalizeBatch, the parent batch hash is passed in calldata,
|
||||
// so that we can use it to get the first batch's parent batch hash, and derive the rest.
|
||||
// The index map serves this purpose with:
|
||||
// Key: commit transaction hash
|
||||
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
|
||||
txBlobIndexMap := make(map[common.Hash][]common.Hash)
|
||||
var l1BatchEvents []*orm.BatchEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
@@ -247,11 +266,59 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
}
|
||||
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
|
||||
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
return nil, err
|
||||
}
|
||||
if version >= 7 { // It's a batch with version >= 7.
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unsupported codec version: %v, err: %w", version, err)
|
||||
}
|
||||
|
||||
// we append the batch hash to the slice for the current commit transaction after processing the batch.
|
||||
// that means the current index of the batch within the transaction is len(txBlobIndexMap[vlog.TxHash]).
|
||||
currentIndex := len(txBlobIndexMap[vlog.TxHash])
|
||||
if currentIndex >= len(commitTx.BlobHashes()) {
|
||||
return nil, fmt.Errorf("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)",
|
||||
vlog.TxHash.String(), len(commitTx.BlobHashes()), currentIndex, event.BatchIndex.Uint64())
|
||||
}
|
||||
blobVersionedHash := commitTx.BlobHashes()[currentIndex]
|
||||
|
||||
// validate the batch hash
|
||||
var parentBatchHash common.Hash
|
||||
if currentIndex == 0 {
|
||||
parentBatchHash, err = utils.GetParentBatchHashFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch header from calldata, tx hash: %s, err: %w", vlog.TxHash.String(), err)
|
||||
}
|
||||
} else {
|
||||
// here we need to subtract 1 from the current index to get the parent batch hash.
|
||||
parentBatchHash = txBlobIndexMap[vlog.TxHash][currentIndex-1]
|
||||
}
|
||||
calculatedBatch, err := codec.NewDABatchFromParams(event.BatchIndex.Uint64(), blobVersionedHash, parentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex.Uint64(), err)
|
||||
}
|
||||
if calculatedBatch.Hash() != event.BatchHash {
|
||||
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
|
||||
}
|
||||
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
return nil, fmt.Errorf("no blocks found in the blob, blobVersionedHash: %s, block number: %d, blob index: %d",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex)
|
||||
}
|
||||
startBlock = blocks[0].Number()
|
||||
endBlock = blocks[len(blocks)-1].Number()
|
||||
|
||||
txBlobIndexMap[vlog.TxHash] = append(txBlobIndexMap[vlog.TxHash], event.BatchHash)
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(btypes.BatchStatusTypeCommitted),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
@@ -260,8 +327,8 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
EndBlockNumber: endBlock,
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L1RevertBatchEventSig:
|
||||
event := backendabi.L1RevertBatchEvent{}
|
||||
case backendabi.L1RevertBatchV0EventSig:
|
||||
event := backendabi.L1RevertBatchV0Event{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
|
||||
log.Error("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
@@ -272,6 +339,19 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
BatchHash: event.BatchHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L1RevertBatchV7EventSig:
|
||||
event := backendabi.L1RevertBatchV7Event{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch0", vlog); err != nil {
|
||||
log.Error("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
for i := event.StartBatchIndex.Uint64(); i <= event.FinishBatchIndex.Uint64(); i++ {
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(btypes.BatchStatusTypeReverted),
|
||||
BatchIndex: i,
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
}
|
||||
case backendabi.L1FinalizeBatchEventSig:
|
||||
event := backendabi.L1FinalizeBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
|
||||
@@ -389,3 +469,27 @@ func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMe
|
||||
}
|
||||
return sender.String(), nil
|
||||
}
|
||||
|
||||
func (e *L1EventParser) getBatchBlockRangeFromBlob(ctx context.Context, codec encoding.Codec, blobVersionedHash common.Hash, l1BlockTime uint64) ([]encoding.DABlock, error) {
|
||||
blob, err := e.blobClient.GetBlobByVersionedHashAndBlockTime(ctx, blobVersionedHash, l1BlockTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blob %s: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
if blob == nil {
|
||||
return nil, fmt.Errorf("blob %s not found", blobVersionedHash.Hex())
|
||||
}
|
||||
|
||||
blobPayload, err := codec.DecodeBlob(blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blob %s decode error: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
|
||||
blocks := blobPayload.Blocks()
|
||||
if len(blocks) == 0 {
|
||||
return nil, fmt.Errorf("empty blocks in blob %s", blobVersionedHash.Hex())
|
||||
}
|
||||
|
||||
log.Debug("Successfully processed blob", "blobVersionedHash", blobVersionedHash.Hex(), "blocksCount", len(blocks))
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
"gorm.io/gorm"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
@@ -49,7 +50,7 @@ type L1FetcherLogic struct {
|
||||
}
|
||||
|
||||
// NewL1FetcherLogic creates L1 fetcher logic
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client, blobClient blob_client.BlobClient) *L1FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
@@ -119,6 +120,10 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.MessageQueueV2Addr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.MessageQueueV2Addr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
@@ -129,7 +134,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
gatewayList: gatewayList,
|
||||
parser: NewL1EventParser(cfg, client),
|
||||
parser: NewL1EventParser(cfg, client, blobClient),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -168,14 +173,10 @@ func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to u
|
||||
return false, 0, lastBlockHash, blocks, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
|
||||
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) ([]*orm.CrossMessage, error) {
|
||||
var l1RevertedTxs []*orm.CrossMessage
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
|
||||
for i := from; i <= to; i++ {
|
||||
block := blocks[i-from]
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
// Gateways: L1 deposit.
|
||||
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
|
||||
@@ -187,7 +188,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, receiptErr
|
||||
return nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
@@ -199,7 +200,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
sender, senderErr := signer.Sender(tx)
|
||||
if senderErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
|
||||
return nil, nil, senderErr
|
||||
return nil, senderErr
|
||||
}
|
||||
|
||||
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
|
||||
@@ -213,7 +214,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
})
|
||||
}
|
||||
}
|
||||
return blockTimestampsMap, l1RevertedTxs, nil
|
||||
return l1RevertedTxs, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
|
||||
@@ -224,7 +225,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
|
||||
query.Topics[0] = make([]common.Hash, 16)
|
||||
query.Topics[0] = make([]common.Hash, 17)
|
||||
query.Topics[0][0] = backendabi.L1DepositETHSig
|
||||
query.Topics[0][1] = backendabi.L1DepositERC20Sig
|
||||
query.Topics[0][2] = backendabi.L1DepositERC721Sig
|
||||
@@ -233,14 +234,15 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
|
||||
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
|
||||
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
|
||||
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
|
||||
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
|
||||
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
|
||||
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
|
||||
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
|
||||
query.Topics[0][13] = backendabi.L1ResetDequeuedTransactionEventSig
|
||||
query.Topics[0][14] = backendabi.L1BridgeBatchDepositSig
|
||||
query.Topics[0][15] = backendabi.L1DepositWrappedTokenSig
|
||||
query.Topics[0][8] = backendabi.L1RevertBatchV0EventSig
|
||||
query.Topics[0][9] = backendabi.L1RevertBatchV7EventSig
|
||||
query.Topics[0][10] = backendabi.L1FinalizeBatchEventSig
|
||||
query.Topics[0][11] = backendabi.L1QueueTransactionEventSig
|
||||
query.Topics[0][12] = backendabi.L1DequeueTransactionEventSig
|
||||
query.Topics[0][13] = backendabi.L1DropTransactionEventSig
|
||||
query.Topics[0][14] = backendabi.L1ResetDequeuedTransactionEventSig
|
||||
query.Topics[0][15] = backendabi.L1BridgeBatchDepositSig
|
||||
query.Topics[0][16] = backendabi.L1DepositWrappedTokenSig
|
||||
|
||||
eventLogs, err := f.client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
@@ -264,12 +266,18 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return isReorg, reorgHeight, blockHash, nil, nil
|
||||
}
|
||||
|
||||
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
|
||||
l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
|
||||
if err != nil {
|
||||
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
// Map block number to block timestamp to avoid fetching block header multiple times to get block timestamp.
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
for _, block := range blocks {
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
}
|
||||
|
||||
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
|
||||
if err != nil {
|
||||
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
|
||||
@@ -282,7 +290,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
|
||||
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -45,7 +46,7 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
|
||||
db = db.Model(&BatchEvent{})
|
||||
db = db.Order("l1_block_number desc")
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
|
||||
@@ -62,7 +63,7 @@ func (c *BatchEvent) GetLastUpdatedFinalizedBlockHeight(ctx context.Context) (ui
|
||||
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUpdated)
|
||||
db = db.Order("batch_index desc")
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// No finalized batch found, return genesis batch's end block number.
|
||||
return 0, nil
|
||||
}
|
||||
@@ -81,7 +82,7 @@ func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Conte
|
||||
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
|
||||
db = db.Order("batch_index asc")
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get unupdated finalized batches >= block height, error: %w", err)
|
||||
@@ -116,7 +117,7 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
|
||||
}
|
||||
case btypes.BatchStatusTypeReverted:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
|
||||
db = db.Where("batch_status != ?", btypes.BatchStatusTypeFinalized)
|
||||
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event, error: %w", err)
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -84,7 +85,7 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
|
||||
db = db.Order("l2_block_number desc")
|
||||
}
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
|
||||
@@ -108,7 +109,7 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
|
||||
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
|
||||
db = db.Order("message_nonce desc")
|
||||
if err := db.First(&message).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
|
||||
@@ -127,10 +128,10 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Order("message_nonce asc")
|
||||
if err := db.Find(&messages).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
|
||||
return nil, fmt.Errorf("failed to get L2 withdrawals by block range, error: %v", err)
|
||||
}
|
||||
return messages, nil
|
||||
}
|
||||
@@ -153,7 +154,7 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
|
||||
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
|
||||
@@ -66,25 +66,26 @@ func ComputeMessageHash(
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
// GetBatchVersionAndBlockRangeFromCalldata find the block range from calldata, both inclusive.
|
||||
func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uint64, error) {
|
||||
const methodIDLength = 4
|
||||
if len(txData) < methodIDLength {
|
||||
return 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
return 0, 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
}
|
||||
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
}
|
||||
values, err := method.Inputs.Unpack(txData[methodIDLength:])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
}
|
||||
|
||||
var chunks [][]byte
|
||||
var version uint8
|
||||
|
||||
if method.Name == "importGenesisBatch" {
|
||||
return 0, 0, nil
|
||||
return 0, 0, 0, nil
|
||||
} else if method.Name == "commitBatch" {
|
||||
type commitBatchArgs struct {
|
||||
Version uint8
|
||||
@@ -95,11 +96,11 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
|
||||
var args commitBatchArgs
|
||||
if err = method.Inputs.Copy(&args, values); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
|
||||
}
|
||||
|
||||
chunks = args.Chunks
|
||||
|
||||
version = args.Version
|
||||
} else if method.Name == "commitBatchWithBlobProof" {
|
||||
type commitBatchWithBlobProofArgs struct {
|
||||
Version uint8
|
||||
@@ -111,10 +112,22 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
|
||||
var args commitBatchWithBlobProofArgs
|
||||
if err = method.Inputs.Copy(&args, values); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
|
||||
}
|
||||
|
||||
chunks = args.Chunks
|
||||
version = args.Version
|
||||
} else if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
|
||||
if len(values) < 3 {
|
||||
return 0, 0, 0, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
|
||||
}
|
||||
|
||||
var ok bool
|
||||
version, ok = values[0].(uint8)
|
||||
if !ok {
|
||||
return 0, 0, 0, fmt.Errorf("invalid version type: %T", values[0])
|
||||
}
|
||||
return version, 0, 0, nil
|
||||
}
|
||||
|
||||
var startBlock uint64
|
||||
@@ -124,7 +137,7 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
// | 1 byte | 60 bytes | ... | 60 bytes |
|
||||
// | num blocks | block 1 | ... | block n |
|
||||
if len(chunks) == 0 {
|
||||
return 0, 0, errors.New("invalid chunks")
|
||||
return 0, 0, 0, errors.New("invalid chunks")
|
||||
}
|
||||
chunk := chunks[0]
|
||||
block := chunk[1:61] // first block in chunk
|
||||
@@ -135,7 +148,36 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
|
||||
finishBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
return startBlock, finishBlock, err
|
||||
return version, startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetParentBatchHashFromCalldata gets the parent batch hash from calldata.
|
||||
// It only supports commitBatches and commitAndFinalizeBatch, which only accept batches >= v7.
|
||||
func GetParentBatchHashFromCalldata(txData []byte) (common.Hash, error) {
|
||||
const methodIDLength = 4
|
||||
if len(txData) < methodIDLength {
|
||||
return common.Hash{}, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
}
|
||||
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
}
|
||||
values, err := method.Inputs.Unpack(txData[methodIDLength:])
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
}
|
||||
|
||||
if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
|
||||
if len(values) < 3 {
|
||||
return common.Hash{}, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
|
||||
}
|
||||
parentBatchHash, ok := values[1].([32]byte)
|
||||
if !ok {
|
||||
return common.Hash{}, fmt.Errorf("invalid parentBatchHash type: %T", values[1])
|
||||
}
|
||||
return common.BytesToHash(parentBatchHash[:]), nil
|
||||
}
|
||||
return common.Hash{}, fmt.Errorf("method %s does not support parent batch header", method.Name)
|
||||
}
|
||||
|
||||
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -91,7 +91,7 @@ linters-settings:
|
||||
#local-prefixes: github.com/org/project
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 30
|
||||
min-complexity: 40
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
@@ -254,6 +254,9 @@ issues:
|
||||
- linters:
|
||||
- wsl
|
||||
text: "expressions should not be cuddled with declarations or returns"
|
||||
- linters:
|
||||
- govet
|
||||
text: 'shadow: declaration of "(err|ctx)" shadows declaration at'
|
||||
|
||||
# Independently from option `exclude` we use default exclude patterns,
|
||||
# it can be disabled by this option. To list all
|
||||
|
||||
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build blob_uploader
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
|
||||
|
||||
# Pull blob_uploader into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/blob_uploader /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["blob_uploader"]
|
||||
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
@@ -17,7 +17,7 @@ RUN --mount=target=. \
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
|
||||
COPY --from=builder /bin/bridgehistoryapi-api /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["bridgehistoryapi-api"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
@@ -10,10 +10,11 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge-history-api/cmd/db_cli && go build -v -p 4 -o /bin/db_cli
|
||||
cd /src/bridge-history-api/cmd/db_cli && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
# Pull db_cli into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
@@ -17,7 +17,8 @@ RUN --mount=target=. \
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
|
||||
RUN apt update && apt install ca-certificates vim netcat-openbsd net-tools curl -y
|
||||
RUN update-ca-certificates
|
||||
COPY --from=builder /bin/bridgehistoryapi-fetcher /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["bridgehistoryapi-fetcher"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -9,6 +9,12 @@ RUN cargo chef prepare --recipe-path recipe.json
|
||||
FROM chef as zkp-builder
|
||||
COPY ./common/libzkp/impl/rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run scripts to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-stark-gpu /openvm-stark-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
@@ -16,7 +22,7 @@ RUN cargo build --release
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
@@ -36,10 +42,11 @@ COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/log
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
# ENV CHAIN_ID=534353
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
|
||||
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/coordinator_api /bin/
|
||||
|
||||
17
build/dockerfiles/coordinator-api/checkout_all.sh
Executable file
17
build/dockerfiles/coordinator-api/checkout_all.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
PLONKY3_GPU_COMMIT=261b322 # v0.2.0
|
||||
OPENVM_STARK_GPU_COMMIT=3082234 # PR#48
|
||||
OPENVM_GPU_COMMIT=8094b4f # branch: patch-v1.2.0
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout plonky3-gpu
|
||||
cd $DIR/plonky3-gpu && git checkout ${PLONKY3_GPU_COMMIT}
|
||||
|
||||
# checkout openvm-stark-gpu
|
||||
cd $DIR/openvm-stark-gpu && git checkout ${OPENVM_STARK_GPU_COMMIT}
|
||||
|
||||
# checkout openvm-gpu
|
||||
cd $DIR/openvm-gpu && git checkout ${OPENVM_GPU_COMMIT}
|
||||
10
build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
Executable file
10
build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone openvm-gpu if not exists
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch --all --force
|
||||
10
build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
Executable file
10
build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone openvm-stark-gpu if not exists
|
||||
if [ ! -d $DIR/openvm-stark-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-stark-gpu.git $DIR/openvm-stark-gpu
|
||||
fi
|
||||
cd $DIR/openvm-stark-gpu && git fetch --all --force
|
||||
10
build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
Executable file
10
build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone plonky3-gpu if not exists
|
||||
if [ ! -d $DIR/plonky3-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/plonky3-gpu.git $DIR/plonky3-gpu
|
||||
fi
|
||||
cd $DIR/plonky3-gpu && git fetch --all --force
|
||||
92
build/dockerfiles/coordinator-api/config.toml
Normal file
92
build/dockerfiles/coordinator-api/config.toml
Normal file
@@ -0,0 +1,92 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
|
||||
# stark-backend
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
|
||||
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
|
||||
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-stark-gpu.git"]
|
||||
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
|
||||
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
|
||||
|
||||
# plonky3
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { path = "/plonky3-gpu/air" }
|
||||
p3-field = { path = "/plonky3-gpu/field" }
|
||||
p3-commit = { path = "/plonky3-gpu/commit" }
|
||||
p3-matrix = { path = "/plonky3-gpu/matrix" }
|
||||
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
|
||||
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
|
||||
p3-util = { path = "/plonky3-gpu/util" }
|
||||
p3-challenger = { path = "/plonky3-gpu/challenger" }
|
||||
p3-dft = { path = "/plonky3-gpu/dft" }
|
||||
p3-fri = { path = "/plonky3-gpu/fri" }
|
||||
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
|
||||
p3-keccak = { path = "/plonky3-gpu/keccak" }
|
||||
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
|
||||
p3-blake3 = { path = "/plonky3-gpu/blake3" }
|
||||
p3-mds = { path = "/plonky3-gpu/mds" }
|
||||
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
|
||||
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
|
||||
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
|
||||
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
|
||||
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
|
||||
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
|
||||
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
|
||||
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
|
||||
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
|
||||
|
||||
# gpu crates
|
||||
[patch."ssh://git@github.com/scroll-tech/plonky3-gpu.git"]
|
||||
p3-gpu-base = { path = "/plonky3-gpu/gpu-base" }
|
||||
p3-gpu-build = { path = "/plonky3-gpu/gpu-build" }
|
||||
p3-gpu-field = { path = "/plonky3-gpu/gpu-field" }
|
||||
p3-gpu-backend = { path = "/plonky3-gpu/gpu-backend" }
|
||||
p3-gpu-module = { path = "/plonky3-gpu/gpu-module" }
|
||||
p3-air = { path = "/plonky3-gpu/air" }
|
||||
p3-field = { path = "/plonky3-gpu/field" }
|
||||
p3-commit = { path = "/plonky3-gpu/commit" }
|
||||
p3-matrix = { path = "/plonky3-gpu/matrix" }
|
||||
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
|
||||
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
|
||||
p3-util = { path = "/plonky3-gpu/util" }
|
||||
p3-challenger = { path = "/plonky3-gpu/challenger" }
|
||||
p3-dft = { path = "/plonky3-gpu/dft" }
|
||||
p3-fri = { path = "/plonky3-gpu/fri" }
|
||||
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
|
||||
p3-keccak = { path = "/plonky3-gpu/keccak" }
|
||||
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
|
||||
p3-blake3 = { path = "/plonky3-gpu/blake3" }
|
||||
p3-mds = { path = "/plonky3-gpu/mds" }
|
||||
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
|
||||
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
|
||||
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
|
||||
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
|
||||
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
|
||||
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
|
||||
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
|
||||
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
|
||||
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
|
||||
2
build/dockerfiles/coordinator-api/gitconfig
Normal file
2
build/dockerfiles/coordinator-api/gitconfig
Normal file
@@ -0,0 +1,2 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -19,9 +19,8 @@ RUN --mount=target=. \
|
||||
|
||||
# Pull coordinator into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
|
||||
COPY --from=builder /bin/coordinator_cron /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["coordinator_cron"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -16,10 +16,11 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
|
||||
cd /src/database/cmd && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
# Pull db_cli into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -21,7 +21,7 @@ RUN --mount=target=. \
|
||||
# Pull gas_oracle into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install ca-certificates -y
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG CUDA_VERSION=11.7.1
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22.12
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -36,7 +36,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22.12
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -32,7 +32,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
23
build/dockerfiles/prover.Dockerfile
Normal file
23
build/dockerfiles/prover.Dockerfile
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM ubuntu:24.04 AS builder
|
||||
|
||||
RUN apt-get update -y && apt-get upgrade -y
|
||||
|
||||
# Install basic packages
|
||||
RUN apt-get install build-essential curl wget git pkg-config -y
|
||||
# Install dev-packages
|
||||
RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
|
||||
# Install Rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
COPY . /src
|
||||
|
||||
RUN cd /src/zkvm-prover && make prover
|
||||
|
||||
FROM ubuntu:24.04 AS runtime
|
||||
|
||||
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["prover"]
|
||||
5
build/dockerfiles/prover.dockerignore
Normal file
5
build/dockerfiles/prover.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -21,7 +21,7 @@ RUN --mount=target=. \
|
||||
# Pull rollup_relayer into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install ca-certificates -y
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
|
||||
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
elapsed := time.Since(begin)
|
||||
sql, rowsAffected := fc()
|
||||
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
}
|
||||
|
||||
// InitDB init the db handler
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
)
|
||||
|
||||
// GetHardforkName returns the name of the hardfork active at the given block height and timestamp.
|
||||
// It checks the chain configuration to determine which hardfork is active.
|
||||
func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uint64) string {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return "homestead"
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return "bernoulli"
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return "curie"
|
||||
} else {
|
||||
return "darwin"
|
||||
}
|
||||
}
|
||||
|
||||
// GetCodecVersion returns the encoding codec version for the given block height and timestamp.
|
||||
// It determines the appropriate codec version based on the active hardfork.
|
||||
func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uint64) encoding.CodecVersion {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return encoding.CodecV0
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return encoding.CodecV1
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return encoding.CodecV2
|
||||
} else {
|
||||
return encoding.CodecV3
|
||||
}
|
||||
}
|
||||
|
||||
// GetMaxChunksPerBatch returns the maximum number of chunks allowed per batch for the given block height and timestamp.
|
||||
// This value may change depending on the active hardfork.
|
||||
func GetMaxChunksPerBatch(config *params.ChainConfig, blockHeight, blockTimestamp uint64) uint64 {
|
||||
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) {
|
||||
return 15
|
||||
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
|
||||
return 15
|
||||
} else if !config.IsDarwin(blockTimestamp) {
|
||||
return 45
|
||||
} else {
|
||||
return 45
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
module scroll-tech/common
|
||||
|
||||
go 1.21
|
||||
go 1.22
|
||||
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/bits-and-blooms/bitset v1.13.0
|
||||
github.com/bits-and-blooms/bitset v1.20.0
|
||||
github.com/docker/docker v26.1.0+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
@@ -13,9 +15,8 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0
|
||||
@@ -31,7 +32,7 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.45 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect
|
||||
@@ -54,18 +55,18 @@ require (
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/cloudflare/cfssl v1.6.5 // indirect
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.7.12 // indirect
|
||||
github.com/containerd/continuity v0.4.2 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.1.1 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315 // indirect
|
||||
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
|
||||
@@ -78,7 +79,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fjl/memsize v0.0.2 // indirect
|
||||
github.com/fsnotify/fsevents v0.1.1 // indirect
|
||||
@@ -118,9 +119,9 @@ require (
|
||||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/holiman/uint256 v1.3.2 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
@@ -135,12 +136,12 @@ require (
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
@@ -183,6 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
@@ -191,16 +193,17 @@ require (
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.4.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
|
||||
@@ -228,17 +231,19 @@ require (
|
||||
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
|
||||
108
common/go.sum
108
common/go.sum
@@ -27,9 +27,11 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
@@ -70,8 +72,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
|
||||
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
@@ -119,10 +121,10 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 h1:NlkpaLBPFr05mNJWVMH7PP4L30gFG6k4z1QpypLUSh8=
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
|
||||
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
||||
@@ -151,10 +153,10 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
|
||||
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
@@ -163,8 +165,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
|
||||
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
@@ -212,8 +214,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
|
||||
@@ -379,13 +381,13 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u
|
||||
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
|
||||
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
|
||||
@@ -434,8 +436,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@@ -453,8 +455,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
|
||||
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
@@ -469,8 +471,9 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
@@ -627,16 +630,16 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -660,6 +663,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE=
|
||||
github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU=
|
||||
@@ -669,8 +674,8 @@ github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IEx
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
@@ -700,10 +705,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
|
||||
@@ -720,8 +725,8 @@ github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0h
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
|
||||
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI=
|
||||
@@ -801,11 +806,15 @@ go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
|
||||
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
|
||||
@@ -820,8 +829,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
@@ -864,8 +873,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -905,22 +914,23 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
@@ -6,6 +6,8 @@ export RUST_BACKTRACE=full
|
||||
export RUST_LOG=debug
|
||||
export RUST_MIN_STACK=100000000
|
||||
export PROVER_OUTPUT_DIR=test_zkp_test
|
||||
export SCROLL_PROVER_ASSETS_DIR=/assets/test_assets
|
||||
export DARWIN_V2_TEST_DIR=/assets
|
||||
#export LD_LIBRARY_PATH=/:/usr/local/cuda/lib64
|
||||
|
||||
mkdir -p $PROVER_OUTPUT_DIR
|
||||
@@ -13,32 +15,16 @@ mkdir -p $PROVER_OUTPUT_DIR
|
||||
REPO=$(realpath ../..)
|
||||
|
||||
function build_test_bins() {
|
||||
cd impl
|
||||
cargo build --release
|
||||
ln -f -s $(realpath target/release/libzkp.so) $REPO/prover/core/lib
|
||||
ln -f -s $(realpath target/release/libzkp.so) $REPO/coordinator/internal/logic/verifier/lib
|
||||
cd $REPO/prover
|
||||
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
|
||||
make tests_binary
|
||||
cd $REPO/coordinator
|
||||
make libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
|
||||
cd $REPO/common/libzkp
|
||||
}
|
||||
|
||||
function build_test_bins_old() {
|
||||
cd $REPO
|
||||
cd prover
|
||||
make libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
|
||||
cd ..
|
||||
cd coordinator
|
||||
make libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
|
||||
cd ..
|
||||
cd common/libzkp
|
||||
}
|
||||
|
||||
build_test_bins
|
||||
#rm -rf test_zkp_test/*
|
||||
rm -rf $PROVER_OUTPUT_DIR/*
|
||||
#rm -rf prover.log verifier.log
|
||||
#$REPO/prover/core.test -test.v 2>&1 | tee prover.log
|
||||
$REPO/prover/prover.test --exact zk_circuits_handler::darwin_v2::tests::test_circuits 2>&1 | tee prover.log
|
||||
$REPO/coordinator/verifier.test -test.v 2>&1 | tee verifier.log
|
||||
|
||||
7649
common/libzkp/impl/Cargo.lock
generated
7649
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,26 +8,14 @@ edition = "2021"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[patch.crates-io]
|
||||
gobuild = { git = "https://github.com/scroll-tech/gobuild.git" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
# patched add rkyv support & MSRV 1.77
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
|
||||
# curie
|
||||
prover_v3 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
# darwin
|
||||
prover_v4 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.0", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.4.2", package = "scroll-zkvm-prover" }
|
||||
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.4.2", package = "scroll-zkvm-verifier" }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
@@ -37,9 +25,42 @@ once_cell = "1.19"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0.66"
|
||||
anyhow = "1.0.86"
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
|
||||
"nightly-features",
|
||||
], tag = "v0.2.0" }
|
||||
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
|
||||
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
|
||||
@@ -1 +1 @@
|
||||
nightly-2023-12-03
|
||||
nightly-2024-12-06
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec, panic_catch};
|
||||
use libc::c_char;
|
||||
use prover_v3::BatchProof as BatchProofLoVersion;
|
||||
use prover_v4::{
|
||||
aggregator::Verifier as VerifierHiVersion, utils::init_env_and_log,
|
||||
BatchProof as BatchProofHiVersion, BundleProof,
|
||||
};
|
||||
use snark_verifier_sdk::verify_evm_calldata;
|
||||
use std::{cell::OnceCell, env};
|
||||
|
||||
static mut VERIFIER: OnceCell<VerifierHiVersion> = OnceCell::new();
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_dir: *const c_char) {
|
||||
init_env_and_log("ffi_batch_verify");
|
||||
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier_hi = VerifierHiVersion::from_dirs(params_dir, assets_dir);
|
||||
|
||||
VERIFIER.set(verifier_hi).unwrap();
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"curie" => 3,
|
||||
"darwin" => 4,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as darwin");
|
||||
4
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 3 {
|
||||
// As of upgrade #3 (Curie), we verify batch proofs on-chain (EVM).
|
||||
let proof = serde_json::from_slice::<BatchProofLoVersion>(proof.as_slice()).unwrap();
|
||||
verify_evm_calldata(
|
||||
include_bytes!("plonk_verifier_0.11.4.bin").to_vec(),
|
||||
proof.calldata(),
|
||||
)
|
||||
} else {
|
||||
// Post upgrade #4 (Darwin), batch proofs are not EVM-verifiable. Instead they are
|
||||
// halo2 proofs meant to be bundled recursively.
|
||||
let proof = serde_json::from_slice::<BatchProofHiVersion>(proof.as_slice()).unwrap();
|
||||
VERIFIER.get().unwrap().verify_batch_proof(&proof)
|
||||
}
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_bundle_proof(proof: *const c_char) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_bundle_proof(proof));
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec, panic_catch};
|
||||
use libc::c_char;
|
||||
use prover_v3::{zkevm::Verifier as VerifierLoVersion, ChunkProof as ChunkProofLoVersion};
|
||||
use prover_v4::{
|
||||
utils::init_env_and_log, zkevm::Verifier as VerifierHiVersion,
|
||||
ChunkProof as ChunkProofHiVersion,
|
||||
};
|
||||
use std::{cell::OnceCell, env};
|
||||
|
||||
static mut VERIFIER_LO_VERSION: OnceCell<VerifierLoVersion> = OnceCell::new();
|
||||
static mut VERIFIER_HI_VERSION: OnceCell<VerifierHiVersion> = OnceCell::new();
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_chunk_verifier(
|
||||
params_dir: *const c_char,
|
||||
v3_assets_dir: *const c_char,
|
||||
v4_assets_dir: *const c_char,
|
||||
) {
|
||||
init_env_and_log("ffi_chunk_verify");
|
||||
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let v3_assets_dir = c_char_to_str(v3_assets_dir);
|
||||
let v4_assets_dir = c_char_to_str(v4_assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", v3_assets_dir);
|
||||
let verifier_lo = VerifierLoVersion::from_dirs(params_dir, v3_assets_dir);
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", v4_assets_dir);
|
||||
let verifier_hi = VerifierHiVersion::from_dirs(params_dir, v4_assets_dir);
|
||||
|
||||
VERIFIER_LO_VERSION.set(verifier_lo).unwrap();
|
||||
VERIFIER_HI_VERSION.set(verifier_hi).unwrap();
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_chunk_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"curie" => 3,
|
||||
"darwin" => 4,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as darwin");
|
||||
4
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 3 {
|
||||
let proof = serde_json::from_slice::<ChunkProofLoVersion>(proof.as_slice()).unwrap();
|
||||
VERIFIER_LO_VERSION.get().unwrap().verify_chunk_proof(proof)
|
||||
} else {
|
||||
let proof = serde_json::from_slice::<ChunkProofHiVersion>(proof.as_slice()).unwrap();
|
||||
VERIFIER_HI_VERSION.get().unwrap().verify_chunk_proof(proof)
|
||||
}
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
@@ -1,4 +1,76 @@
|
||||
mod batch;
|
||||
mod chunk;
|
||||
mod types;
|
||||
mod utils;
|
||||
mod verifier;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec};
|
||||
use libc::c_char;
|
||||
use verifier::{TaskType, VerifierConfig};
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init(config: *const c_char) {
|
||||
let config_str = c_char_to_str(config);
|
||||
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
|
||||
verifier::init(verifier_config);
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_chunk_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Chunk)
|
||||
}
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let proof = c_char_to_vec(proof);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Err(e) = verifier {
|
||||
log::warn!("failed to get verifier, error: {:#}", e);
|
||||
return 0 as c_char;
|
||||
}
|
||||
match verifier.unwrap().verify(task_type, proof) {
|
||||
Err(e) => {
|
||||
log::error!("{:?} verify failed, error: {:#}", task_type, e);
|
||||
false as c_char
|
||||
}
|
||||
Ok(result) => result as c_char,
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Batch)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_bundle_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Bundle)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
_dump_vk(fork_name, file);
|
||||
}
|
||||
|
||||
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Ok(verifier) = verifier {
|
||||
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// Represents the result of a chunk proof checking operation.
|
||||
// `ok` indicates whether the proof checking was successful.
|
||||
// `error` provides additional details in case the check failed.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct CheckChunkProofsResponse {
|
||||
pub ok: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// Encapsulates the result from generating a proof.
|
||||
// `message` holds the generated proof in byte slice format.
|
||||
// `error` provides additional details in case the proof generation failed.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ProofResult {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<Vec<u8>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
65
common/libzkp/impl/src/verifier.rs
Normal file
65
common/libzkp/impl/src/verifier.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
#![allow(static_mut_refs)]
|
||||
|
||||
mod euclidv2;
|
||||
use anyhow::{bail, Result};
|
||||
use euclidv2::EuclidV2Verifier;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::OnceCell, path::Path, rc::Rc};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum TaskType {
|
||||
Chunk,
|
||||
Batch,
|
||||
Bundle,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VKDump {
|
||||
pub chunk_vk: String,
|
||||
pub batch_vk: String,
|
||||
pub bundle_vk: String,
|
||||
}
|
||||
|
||||
pub trait ProofVerifier {
|
||||
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
|
||||
fn dump_vk(&self, file: &Path);
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub fork_name: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifierConfig {
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
}
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
|
||||
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
|
||||
|
||||
pub fn init(config: VerifierConfig) {
|
||||
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_HIGH
|
||||
.set(VerifierPair(
|
||||
config.high_version_circuit.fork_name,
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
|
||||
unsafe {
|
||||
if let Some(verifier) = VERIFIER_HIGH.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
bail!("failed to get verifier, key not found, {}", fork_name)
|
||||
}
|
||||
65
common/libzkp/impl/src/verifier/euclidv2.rs
Normal file
65
common/libzkp/impl/src/verifier/euclidv2.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidV2Verifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV2,
|
||||
}
|
||||
|
||||
impl EuclidV2Verifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidV2Verifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
// BatchVerifier is used to:
|
||||
// - Verify a batch proof
|
||||
// - Verify a bundle proof
|
||||
void init_batch_verifier(char* params_dir, char* assets_dir);
|
||||
void init(char* config);
|
||||
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
|
||||
char verify_bundle_proof(char* proof);
|
||||
char verify_bundle_proof(char* proof, char* fork_name);
|
||||
|
||||
void init_chunk_verifier(char* params_dir, char* v3_assets_dir, char* v4_assets_dir);
|
||||
char verify_chunk_proof(char* proof, char* fork_name);
|
||||
|
||||
void dump_vk(char* fork_name, char* file);
|
||||
|
||||
@@ -21,9 +21,10 @@ import (
|
||||
|
||||
// TestcontainerApps testcontainers struct
|
||||
type TestcontainerApps struct {
|
||||
postgresContainer *postgres.PostgresContainer
|
||||
l2GethContainer *testcontainers.DockerContainer
|
||||
poSL1Container compose.ComposeStack
|
||||
postgresContainer *postgres.PostgresContainer
|
||||
l2GethContainer *testcontainers.DockerContainer
|
||||
poSL1Container compose.ComposeStack
|
||||
web3SignerContainer *testcontainers.DockerContainer
|
||||
|
||||
// common time stamp in nanoseconds.
|
||||
Timestamp int
|
||||
@@ -112,6 +113,47 @@ func (t *TestcontainerApps) StartPoSL1Container() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TestcontainerApps) StartWeb3SignerContainer(chainId int) error {
|
||||
if t.web3SignerContainer != nil && t.web3SignerContainer.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
err error
|
||||
rootDir string
|
||||
)
|
||||
if rootDir, err = findProjectRootDir(); err != nil {
|
||||
return fmt.Errorf("failed to find project root directory: %v", err)
|
||||
}
|
||||
|
||||
// web3signerconf/keyconf.yaml may contain multiple keys configured and web3signer then choses one corresponding to from field of tx
|
||||
web3SignerConfDir := filepath.Join(rootDir, "common", "testcontainers", "web3signerconf")
|
||||
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "consensys/web3signer:develop",
|
||||
ExposedPorts: []string{"9000/tcp"},
|
||||
Cmd: []string{"--key-config-path", "/web3signerconf/", "eth1", "--chain-id", fmt.Sprintf("%d", chainId)},
|
||||
Files: []testcontainers.ContainerFile{
|
||||
{
|
||||
HostFilePath: web3SignerConfDir,
|
||||
ContainerFilePath: "/",
|
||||
FileMode: 0o777,
|
||||
},
|
||||
},
|
||||
WaitingFor: wait.ForLog("ready to handle signing requests"),
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
Started: true,
|
||||
}
|
||||
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
|
||||
if err != nil {
|
||||
log.Printf("failed to start web3signer container: %s", err)
|
||||
return err
|
||||
}
|
||||
t.web3SignerContainer, _ = container.(*testcontainers.DockerContainer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint
|
||||
func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
|
||||
if t.poSL1Container == nil {
|
||||
@@ -153,6 +195,14 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetWeb3SignerEndpoint returns the endpoint of the running L2Geth container
|
||||
func (t *TestcontainerApps) GetWeb3SignerEndpoint() (string, error) {
|
||||
if t.web3SignerContainer == nil || !t.web3SignerContainer.IsRunning() {
|
||||
return "", errors.New("web3signer is not running")
|
||||
}
|
||||
return t.web3SignerContainer.PortEndpoint(context.Background(), "9000/tcp", "http")
|
||||
}
|
||||
|
||||
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
|
||||
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
endpoint, err := t.GetDBEndPoint()
|
||||
@@ -201,6 +251,11 @@ func (t *TestcontainerApps) Free() {
|
||||
t.poSL1Container = nil
|
||||
}
|
||||
}
|
||||
if t.web3SignerContainer != nil && t.web3SignerContainer.IsRunning() {
|
||||
if err := t.web3SignerContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop web3signer container: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findProjectRootDir find project root directory
|
||||
|
||||
@@ -44,6 +44,11 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
|
||||
endpoint, err = testApps.GetWeb3SignerEndpoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
|
||||
// test free testcontainers
|
||||
testApps.Free()
|
||||
endpoint, err = testApps.GetDBEndPoint()
|
||||
@@ -57,4 +62,8 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.EqualError(t, err, "PoS L1 container is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
|
||||
endpoint, err = testApps.GetWeb3SignerEndpoint()
|
||||
assert.EqualError(t, err, "web3signer is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
}
|
||||
|
||||
7
common/testcontainers/web3signerconf/keyconf.yaml
Normal file
7
common/testcontainers/web3signerconf/keyconf.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
type: "file-raw"
|
||||
keyType: "SECP256K1"
|
||||
privateKey: "0x1313131313131313131313131313131313131313131313131313131313131313"
|
||||
---
|
||||
type: "file-raw"
|
||||
keyType: "SECP256K1"
|
||||
privateKey: "0x1212121212121212121212121212121212121212121212121212121212121212"
|
||||
4
common/testdata/blobdata.json
vendored
Normal file
4
common/testdata/blobdata.json
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -276,8 +276,8 @@ const (
|
||||
SenderTypeFinalizeBatch
|
||||
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
|
||||
SenderTypeL1GasOracle
|
||||
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
|
||||
SenderTypeL2GasOracle
|
||||
// SenderTypeL2GasOracleDeprecated indicates a sender from L1 responsible for updating L2 gas prices, which is deprecated.
|
||||
SenderTypeL2GasOracleDeprecated
|
||||
)
|
||||
|
||||
// String returns a string representation of the SenderType.
|
||||
@@ -289,8 +289,8 @@ func (t SenderType) String() string {
|
||||
return "SenderTypeFinalizeBatch"
|
||||
case SenderTypeL1GasOracle:
|
||||
return "SenderTypeL1GasOracle"
|
||||
case SenderTypeL2GasOracle:
|
||||
return "SenderTypeL2GasOracle"
|
||||
case SenderTypeL2GasOracleDeprecated:
|
||||
return "SenderTypeL2GasOracleDeprecated"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
|
||||
}
|
||||
@@ -326,3 +326,53 @@ func (s TxStatus) String() string {
|
||||
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobUploadStatus represents the status of a blob upload
|
||||
type BlobUploadStatus int
|
||||
|
||||
const (
|
||||
// BlobUploadStatusUndefined indicates an undefined status
|
||||
BlobUploadStatusUndefined BlobUploadStatus = iota
|
||||
// BlobUploadStatusPending indicates a pending upload status
|
||||
BlobUploadStatusPending
|
||||
// BlobUploadStatusUploaded indicates a successful upload status
|
||||
BlobUploadStatusUploaded
|
||||
// BlobUploadStatusFailed indicates a failed upload status
|
||||
BlobUploadStatusFailed
|
||||
)
|
||||
|
||||
func (s BlobUploadStatus) String() string {
|
||||
switch s {
|
||||
case BlobUploadStatusPending:
|
||||
return "BlobUploadStatusPending"
|
||||
case BlobUploadStatusUploaded:
|
||||
return "BlobUploadStatusUploaded"
|
||||
case BlobUploadStatusFailed:
|
||||
return "BlobUploadStatusFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobUploadStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobStoragePlatform represents the platform a blob upload to
|
||||
type BlobStoragePlatform int
|
||||
|
||||
const (
|
||||
// BlobStoragePlatformUndefined indicates an undefined platform
|
||||
BlobStoragePlatformUndefined BlobStoragePlatform = iota
|
||||
// BlobStoragePlatformS3 represents AWS S3
|
||||
BlobStoragePlatformS3
|
||||
// BlobStoragePlatformArweave represents storage blockchain Arweave
|
||||
BlobStoragePlatformArweave
|
||||
)
|
||||
|
||||
func (s BlobStoragePlatform) String() string {
|
||||
switch s {
|
||||
case BlobStoragePlatformS3:
|
||||
return "BlobStoragePlatformS3"
|
||||
case BlobStoragePlatformArweave:
|
||||
return "BlobStoragePlatformArweave"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobStoragePlatform (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,9 +173,9 @@ func TestSenderType(t *testing.T) {
|
||||
"SenderTypeL1GasOracle",
|
||||
},
|
||||
{
|
||||
"SenderTypeL2GasOracle",
|
||||
SenderTypeL2GasOracle,
|
||||
"SenderTypeL2GasOracle",
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
SenderTypeL2GasOracleDeprecated,
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
const (
|
||||
// StatusOk means generate proof success
|
||||
StatusOk RespStatus = iota
|
||||
// StatusProofError means generate proof failed
|
||||
StatusProofError
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
)
|
||||
|
||||
// ProofType represents the type of task.
|
||||
@@ -45,118 +43,263 @@ const (
|
||||
ProofTypeBundle
|
||||
)
|
||||
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail.
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
|
||||
type ChunkTaskDetail struct {
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// it is a hex encoded big with fixed length on 48 bytes
|
||||
type Byte48 struct {
|
||||
hexutil.Big
|
||||
}
|
||||
|
||||
func (e Byte48) MarshalText() ([]byte, error) {
|
||||
i := e.ToInt()
|
||||
// overrite encode big
|
||||
if sign := i.Sign(); sign < 0 {
|
||||
// sanity check
|
||||
return nil, errors.New("Byte48 must be positive integer")
|
||||
} else {
|
||||
s := i.Text(16)
|
||||
if len(s) > 96 {
|
||||
return nil, errors.New("integer Exceed 384bit")
|
||||
}
|
||||
return []byte(fmt.Sprintf("0x%0*s", 96, s)), nil
|
||||
}
|
||||
}
|
||||
|
||||
func isString(input []byte) bool {
|
||||
return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
|
||||
}
|
||||
|
||||
// hexutil.Big has limition of 256bit so we have to override it ...
|
||||
func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
if !isString(input) {
|
||||
return errors.New("not hex string")
|
||||
}
|
||||
|
||||
b, err := hexutil.Decode(string(input[1 : len(input)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) != 48 {
|
||||
return fmt.Errorf("not a 48 bytes hex string: %d", len(b))
|
||||
}
|
||||
var dec big.Int
|
||||
dec.SetBytes(b)
|
||||
*e = Byte48{(hexutil.Big)(dec)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*ChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader *codecv3.DABatch `json:"batch_header"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
}
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
BatchProofs []*BatchProof `json:"batch_proofs"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
type ChunkInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
TxBytesHash common.Hash `json:"tx_data_digest"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
TxDataLength uint64 `json:"tx_data_length"`
|
||||
InitialBlockNumber uint64 `json:"initial_block_number"`
|
||||
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
|
||||
}
|
||||
|
||||
// BlockContextV2 is the block context for euclid v2
|
||||
type BlockContextV2 struct {
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
BaseFee hexutil.Big `json:"base_fee"`
|
||||
GasLimit uint64 `json:"gas_limit"`
|
||||
NumTxs uint16 `json:"num_txs"`
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
}
|
||||
|
||||
// Proof for flatten EVM proof
|
||||
type OpenVMEvmProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
}
|
||||
|
||||
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type OpenVMChunkProof struct {
|
||||
MetaData struct {
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
func (p *OpenVMChunkProof) Proof() []byte {
|
||||
proofJson, err := json.Marshal(p.VmProof)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("marshaling error", err))
|
||||
}
|
||||
|
||||
return proofJson
|
||||
}
|
||||
|
||||
// OpenVMBatchInfo is for calculating pi_hash for batch header
|
||||
type OpenVMBatchInfo struct {
|
||||
ParentBatchHash common.Hash `json:"parent_batch_hash"`
|
||||
ParentStateRoot common.Hash `json:"parent_state_root"`
|
||||
StateRoot common.Hash `json:"state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type OpenVMBatchProof struct {
|
||||
MetaData struct {
|
||||
BatchInfo *OpenVMBatchInfo `json:"batch_info"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
func (p *OpenVMBatchProof) Proof() []byte {
|
||||
proofJson, err := json.Marshal(p.VmProof)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("marshaling error", err))
|
||||
}
|
||||
|
||||
return proofJson
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *OpenVMBatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
if ap.MetaData.BatchInfo == nil {
|
||||
return errors.New("batch info not ready")
|
||||
}
|
||||
|
||||
if ap.VmProof == nil {
|
||||
return errors.New("proof not ready")
|
||||
} else {
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
pf := ap.VmProof
|
||||
if pf.Proof == nil {
|
||||
return errors.New("proof data not ready")
|
||||
}
|
||||
if len(pf.PublicValues) == 0 {
|
||||
return errors.New("proof public value not ready")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenVMBundleInfo is for calculating pi_hash for bundle header
|
||||
type OpenVMBundleInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
NumBatches uint32 `json:"num_batches"`
|
||||
PrevBatchHash common.Hash `json:"prev_batch_hash"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
MsgQueueHash common.Hash `json:"msg_queue_hash"`
|
||||
}
|
||||
|
||||
// SubCircuitRowUsage tracing info added in v0.11.0rc8
|
||||
type SubCircuitRowUsage struct {
|
||||
Name string `json:"name"`
|
||||
RowNumber uint64 `json:"row_number"`
|
||||
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type OpenVMBundleProof struct {
|
||||
MetaData struct {
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info"`
|
||||
BunndlePIHash common.Hash `json:"bundle_pi_hash"`
|
||||
} `json:"metadata"`
|
||||
|
||||
EvmProof *OpenVMEvmProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type BatchProof struct {
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *BatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type BundleProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
// Proof returns the proof bytes that are eventually passed as calldata for on-chain bundle proof verification.
|
||||
//
|
||||
// There are 12 accumulators for a SNARK proof. The accumulators are the first 12 elements of the EvmProof's
|
||||
// Instances field. The remaining items in Instances are supplied on-chain by the ScrollChain contract.
|
||||
//
|
||||
// The structure of these bytes is:
|
||||
// | byte index start | byte length | value | description |
|
||||
// |------------------|----------------|----------|---------------------|
|
||||
// | 0 | 32 | accs[0] | accumulator 1 |
|
||||
// | 32 | 32 | accs[1] | accumulator 2 |
|
||||
// | 32*i ... | 32 | accs[i] | accumulator i ... |
|
||||
// | 352 | 32 | accs[11] | accumulator 12 |
|
||||
// | 384 | dynamic | proof | proof bytes |
|
||||
func (p *OpenVMBundleProof) Proof() []byte {
|
||||
proofBytes := make([]byte, 0, 384+len(p.EvmProof.Proof))
|
||||
proofBytes = append(proofBytes, p.EvmProof.Instances[:384]...)
|
||||
return append(proofBytes, p.EvmProof.Proof...)
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BundleProof is in a legal format
|
||||
func (ap *BundleProof) SanityCheck() error {
|
||||
func (ap *OpenVMBundleProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
if ap.MetaData.BundleInfo == nil {
|
||||
return errors.New("bundle info not ready")
|
||||
}
|
||||
|
||||
if ap.EvmProof == nil {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
} else {
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
pf := ap.EvmProof
|
||||
if len(pf.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(pf.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
if len(pf.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
22
common/types/message/message_test.go
Normal file
22
common/types/message/message_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBytes48(t *testing.T) {
|
||||
ti := &Byte48{}
|
||||
ti.UnmarshalText([]byte("0x1"))
|
||||
if s, err := ti.MarshalText(); err == nil {
|
||||
if len(s) != 98 {
|
||||
panic(fmt.Sprintf("wrong str: %s", s))
|
||||
}
|
||||
}
|
||||
ti.UnmarshalText([]byte("0x0"))
|
||||
if s, err := ti.MarshalText(); err == nil {
|
||||
if len(s) != 98 {
|
||||
panic(fmt.Sprintf("wrong str: %s", s))
|
||||
}
|
||||
}
|
||||
}
|
||||
23
common/utils/blob.go
Normal file
23
common/utils/blob.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// CalculateVersionedBlobHash calculate the kzg4844 versioned blob hash from a blob
|
||||
func CalculateVersionedBlobHash(blob kzg4844.Blob) ([32]byte, error) {
|
||||
// calculate kzg4844 commitment from blob
|
||||
commit, err := kzg4844.BlobToCommitment(&blob)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("failed to get blob commitment, err: %w", err)
|
||||
}
|
||||
|
||||
// calculate kzg4844 versioned blob hash from blob commitment
|
||||
hasher := sha256.New()
|
||||
vh := kzg4844.CalcBlobHashV1(hasher, &commit)
|
||||
|
||||
return vh, nil
|
||||
}
|
||||
51
common/utils/blob_test.go
Normal file
51
common/utils/blob_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
type BlobData struct {
|
||||
VersionedBlobHash string `json:"versionedBlobHash"`
|
||||
BlobData string `json:"blobData"`
|
||||
}
|
||||
|
||||
// TestCalculateVersionedBlobHash tests the CalculateVersionedBlobHash function
|
||||
func TestCalculateVersionedBlobHash(t *testing.T) {
|
||||
// Read the test data
|
||||
data, err := os.ReadFile("../testdata/blobdata.json")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
var blobData BlobData
|
||||
if err := json.Unmarshal(data, &blobData); err != nil {
|
||||
t.Fatalf("Failed to parse blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
blobBytes, err := hex.DecodeString(blobData.BlobData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode blob data: %v", err)
|
||||
}
|
||||
|
||||
// Convert []byte to kzg4844.Blob
|
||||
var blob kzg4844.Blob
|
||||
copy(blob[:], blobBytes)
|
||||
|
||||
// Calculate the hash
|
||||
calculatedHashBytes, err := CalculateVersionedBlobHash(blob)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate versioned blob hash: %v", err)
|
||||
}
|
||||
|
||||
calculatedHash := hex.EncodeToString(calculatedHashBytes[:])
|
||||
|
||||
if calculatedHash != blobData.VersionedBlobHash {
|
||||
t.Fatalf("Hash mismatch: got %s, want %s", calculatedHash, blobData.VersionedBlobHash)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -20,7 +20,11 @@ var (
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
&ImportGenesisFlag,
|
||||
&MinCodecVersionFlag,
|
||||
}
|
||||
// ProposerToolFlags contains flags only used in proposer tool
|
||||
ProposerToolFlags = []cli.Flag{
|
||||
&StartL2BlockFlag,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
@@ -72,12 +76,6 @@ var (
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
// ImportGenesisFlag import genesis batch during startup
|
||||
ImportGenesisFlag = cli.BoolFlag{
|
||||
Name: "import-genesis",
|
||||
Usage: "Import genesis batch into L1 contract during startup",
|
||||
Value: false,
|
||||
}
|
||||
// ServicePortFlag is the port the service will listen on
|
||||
ServicePortFlag = cli.IntFlag{
|
||||
Name: "service.port",
|
||||
@@ -90,4 +88,16 @@ var (
|
||||
Usage: "Genesis file of the network",
|
||||
Value: "./conf/genesis.json",
|
||||
}
|
||||
// MinCodecVersionFlag defines the minimum codec version required for the chunk/batch/bundle proposers
|
||||
MinCodecVersionFlag = cli.UintFlag{
|
||||
Name: "min-codec-version",
|
||||
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
|
||||
Required: true,
|
||||
}
|
||||
// StartL2BlockFlag indicates the start L2 block number for proposer tool
|
||||
StartL2BlockFlag = cli.Uint64Flag{
|
||||
Name: "start-l2-block",
|
||||
Usage: "Start L2 block number for proposer tool",
|
||||
Value: 0,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -9,6 +9,10 @@ import (
|
||||
|
||||
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
|
||||
func CheckScrollProverVersion(proverVersion string) bool {
|
||||
if strings.HasPrefix(proverVersion, "sdk") {
|
||||
return CheckProverSDKVersion(proverVersion)
|
||||
}
|
||||
|
||||
// note the version is in fact in the format of "tag-commit-scroll_prover-halo2",
|
||||
// so split-by-'-' length should be 4
|
||||
remote := strings.Split(proverVersion, "-")
|
||||
@@ -23,8 +27,18 @@ func CheckScrollProverVersion(proverVersion string) bool {
|
||||
return remote[2] == local[2]
|
||||
}
|
||||
|
||||
// CheckProverSDKVersion check prover sdk version, it simply returns true for now,
|
||||
// and more checks will be added as we evolve.
|
||||
func CheckProverSDKVersion(proverVersion string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// CheckScrollRepoVersion checks if the proverVersion is at least the minimum required version.
|
||||
func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
|
||||
if strings.HasPrefix(proverVersion, "sdk") {
|
||||
return CheckProverSDKWithMinVersion(proverVersion, minVersion)
|
||||
}
|
||||
|
||||
c, err := semver.NewConstraint(">= " + minVersion + "-0")
|
||||
if err != nil {
|
||||
log.Error("failed to initialize constraint", "minVersion", minVersion, "error", err)
|
||||
@@ -39,3 +53,9 @@ func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
|
||||
|
||||
return c.Check(v)
|
||||
}
|
||||
|
||||
// CheckProverSDKWithMinVersion check prover sdk version is at least the minimum required version, it simply returns true for now,
|
||||
// and more checks will be added as we evolve.
|
||||
func CheckProverSDKWithMinVersion(proverVersion string, minVersion string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.46"
|
||||
var tag = "v4.5.24"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -88,13 +88,17 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
}
|
||||
// Reset prover manager config for manager test cases.
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
AssetsPath: "",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 60,
|
||||
ChunkCollectionTimeSec: 60,
|
||||
SessionAttempts: 10,
|
||||
MaxVerifierWorkers: 4,
|
||||
MinProverVersion: "v1.0.0",
|
||||
}
|
||||
endpoint, err := c.testApps.GetDBEndPoint()
|
||||
if err != nil {
|
||||
|
||||
@@ -62,9 +62,9 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
|
||||
}
|
||||
|
||||
var batchProofs []*message.BatchProof
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.BatchProof
|
||||
var proof message.OpenVMBatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
log.Error("failed to unmarshal batch proof")
|
||||
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
|
||||
|
||||
@@ -2,18 +2,17 @@
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"fork_name": "bernoulli",
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"assets_path_lo": "",
|
||||
"assets_path_hi": ""
|
||||
},
|
||||
"max_verifier_workers": 4,
|
||||
"min_prover_version": "v1.0.0"
|
||||
"high_version_circuit": {
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
}
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
module scroll-tech/coordinator
|
||||
|
||||
go 1.21
|
||||
go 1.22
|
||||
|
||||
require (
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
@@ -37,32 +40,27 @@ require (
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/net v0.23.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.20.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.12.1 // indirect
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 // indirect
|
||||
github.com/holiman/uint256 v1.3.2 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
@@ -71,14 +69,14 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/supranational/blst v0.3.12 // indirect
|
||||
github.com/supranational/blst v0.3.13 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.14 // indirect
|
||||
github.com/tklauser/numcpus v0.8.0 // indirect
|
||||
github.com/tklauser/numcpus v0.9.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
|
||||
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
|
||||
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
|
||||
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -30,21 +32,21 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
|
||||
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
|
||||
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
|
||||
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
@@ -93,11 +95,11 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
|
||||
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk=
|
||||
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
|
||||
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
@@ -107,6 +109,8 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@@ -120,8 +124,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
|
||||
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
@@ -169,14 +173,14 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -194,10 +198,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8=
|
||||
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
@@ -208,8 +212,8 @@ github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
|
||||
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
|
||||
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
|
||||
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
|
||||
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
|
||||
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
@@ -232,8 +236,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -243,13 +247,13 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
|
||||
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -265,8 +269,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
@@ -275,8 +279,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
|
||||
@@ -16,6 +16,8 @@ type ProverManager struct {
|
||||
// Number of attempts that a session can be retried if previous attempts failed.
|
||||
// Currently we only consider proving timeout as failure here.
|
||||
SessionAttempts uint8 `json:"session_attempts"`
|
||||
// Threshold for activating the external prover based on unassigned task count.
|
||||
ExternalProverThreshold int64 `json:"external_prover_threshold"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
// BatchCollectionTimeSec batch Proof collection time (in seconds).
|
||||
@@ -24,10 +26,6 @@ type ProverManager struct {
|
||||
ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"`
|
||||
// BundleCollectionTimeSec bundle Proof collection time (in seconds).
|
||||
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"`
|
||||
// Max number of workers in verifier worker pool
|
||||
MaxVerifierWorkers int `json:"max_verifier_workers"`
|
||||
// MinProverVersion is the minimum version of the prover that is required.
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
}
|
||||
|
||||
// L2 loads l2geth configuration items.
|
||||
@@ -51,13 +49,16 @@ type Config struct {
|
||||
Auth *Auth `json:"auth"`
|
||||
}
|
||||
|
||||
// CircuitConfig circuit items.
|
||||
type CircuitConfig struct {
|
||||
AssetsPath string `json:"assets_path"`
|
||||
ForkName string `json:"fork_name"`
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
}
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
MockMode bool `json:"mock_mode"`
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPathLo string `json:"assets_path_lo"` // lower version Verifier
|
||||
AssetsPathHi string `json:"assets_path_hi"` // higher version Verifier
|
||||
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
|
||||
@@ -15,15 +15,18 @@ func TestConfig(t *testing.T) {
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"agg_vk_path": ""
|
||||
"high_version_circuit": {
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
},
|
||||
"max_verifier_workers": 4,
|
||||
"min_prover_version": "v1.0.0"
|
||||
"max_verifier_workers": 4
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
jwt "github.com/appleboy/gin-jwt/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
@@ -45,35 +44,37 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
return "", fmt.Errorf("check the login parameter failure: %w", err)
|
||||
}
|
||||
|
||||
hardForkNames, err := a.loginLogic.ProverHardForkName(&login)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("prover hard fork name failure:%w", err)
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
|
||||
return "", fmt.Errorf("login insert challenge string failure:%w", err)
|
||||
}
|
||||
return login, nil
|
||||
|
||||
returnData := types.LoginParameterWithHardForkName{
|
||||
HardForkName: hardForkNames,
|
||||
LoginParameter: login,
|
||||
}
|
||||
|
||||
return returnData, nil
|
||||
}
|
||||
|
||||
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
|
||||
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
v, ok := data.(types.LoginParameter)
|
||||
v, ok := data.(types.LoginParameterWithHardForkName)
|
||||
if !ok {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
publicKey := v.PublicKey
|
||||
if publicKey == "" {
|
||||
var err error
|
||||
publicKey, err = v.RecoverPublicKeyFromSignature()
|
||||
if err != nil {
|
||||
// do not handle error here since already called v.Verify() beforehands so there should be no error
|
||||
// add log just in case some error happens
|
||||
log.Error("impossible path: failed to recover public key from signature", "error", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: publicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.HardForkName: v.HardForkName,
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.ProverProviderTypeKey: v.Message.ProverProviderType,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -91,5 +92,14 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
|
||||
if hardForkName, ok := claims[types.HardForkName]; ok {
|
||||
c.Set(types.HardForkName, hardForkName)
|
||||
}
|
||||
|
||||
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
|
||||
c.Set(types.ProverProviderTypeKey, providerType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
|
||||
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
|
||||
|
||||
Auth = NewAuthController(db, cfg, vf)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
|
||||
|
||||
@@ -108,10 +108,6 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
|
||||
func (ptc *GetTaskController) proofType(para *coordinatorType.GetTaskParameter) message.ProofType {
|
||||
var proofTypes []message.ProofType
|
||||
if para.TaskType != 0 {
|
||||
proofTypes = append(proofTypes, message.ProofType(para.TaskType))
|
||||
}
|
||||
|
||||
for _, proofType := range para.TaskTypes {
|
||||
proofTypes = append(proofTypes, message.ProofType(proofType))
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package auth
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
@@ -20,34 +21,26 @@ import (
|
||||
type LoginLogic struct {
|
||||
cfg *config.Config
|
||||
challengeOrm *orm.Challenge
|
||||
chunkVks map[string]struct{}
|
||||
batchVKs map[string]struct{}
|
||||
bundleVks map[string]struct{}
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
proverVersionHardForkMap map[string][]string
|
||||
}
|
||||
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
l := &LoginLogic{
|
||||
cfg: cfg,
|
||||
chunkVks: make(map[string]struct{}),
|
||||
batchVKs: make(map[string]struct{}),
|
||||
bundleVks: make(map[string]struct{}),
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
}
|
||||
proverVersionHardForkMap := make(map[string][]string)
|
||||
|
||||
for _, vk := range vf.ChunkVKMap {
|
||||
l.chunkVks[vk] = struct{}{}
|
||||
}
|
||||
var highHardForks []string
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
|
||||
|
||||
for _, vk := range vf.BatchVKMap {
|
||||
l.batchVKs[vk] = struct{}{}
|
||||
return &LoginLogic{
|
||||
cfg: cfg,
|
||||
openVmVks: vf.OpenVMVkMap,
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
proverVersionHardForkMap: proverVersionHardForkMap,
|
||||
}
|
||||
|
||||
for _, vk := range vf.BundleVkMap {
|
||||
l.bundleVks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
@@ -56,52 +49,59 @@ func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) e
|
||||
}
|
||||
|
||||
func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
if login.PublicKey != "" {
|
||||
verify, err := login.Verify()
|
||||
if err != nil || !verify {
|
||||
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
|
||||
verify, err := login.Verify()
|
||||
if err != nil || !verify {
|
||||
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
vks := make(map[string]struct{})
|
||||
for vk := range l.openVmVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
}
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
l.cfg.ProverManager.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
if len(login.Message.ProverTypes) > 0 {
|
||||
vks := make(map[string]struct{})
|
||||
for _, proverType := range login.Message.ProverTypes {
|
||||
switch proverType {
|
||||
case types.ProverTypeChunk:
|
||||
for vk := range l.chunkVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
case types.ProverTypeBatch:
|
||||
for vk := range l.batchVKs {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
for vk := range l.bundleVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
default:
|
||||
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
|
||||
}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
|
||||
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
} else {
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProverHardForkName retrieves hard fork name which prover belongs to
|
||||
func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, error) {
|
||||
proverVersionSplits := strings.Split(login.Message.ProverVersion, "-")
|
||||
if len(proverVersionSplits) == 0 {
|
||||
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
proverVersion := proverVersionSplits[0]
|
||||
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
@@ -4,18 +4,19 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv3"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
// BatchProverTask is prover task implement for batch proof
|
||||
@@ -64,7 +66,20 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedBatchCount, getCountError := bp.batchOrm.GetUnassignedBatchCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedBatchCount < bp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var batchTask *orm.Batch
|
||||
var hardForkName string
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBatchTask *orm.Batch
|
||||
@@ -89,6 +104,30 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
taskCtx.taskType = message.ProofTypeBatch
|
||||
taskCtx.batchTask = tmpBatchTask
|
||||
|
||||
var checkErr error
|
||||
hardForkName, checkErr = bp.hardForkSanityCheck(ctx, taskCtx)
|
||||
if checkErr != nil {
|
||||
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -110,14 +149,6 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
hardForkName, getHardForkErr := bp.hardForkName(ctx, batchTask)
|
||||
if getHardForkErr != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("retrieve hard fork name by batch failed", "task_id", batchTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
@@ -154,20 +185,6 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) hardForkName(ctx *gin.Context, batchTask *orm.Batch) (string, error) {
|
||||
startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, batchTask.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
|
||||
l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, batch *orm.Batch, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get chunk from db
|
||||
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
|
||||
@@ -180,40 +197,34 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
|
||||
}
|
||||
|
||||
var chunkProofs []*message.ChunkProof
|
||||
var chunkProofs []*message.OpenVMChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
var proof message.ChunkProof
|
||||
var proof message.OpenVMChunkProof
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, &proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
IsPadding: false,
|
||||
}
|
||||
if proof.ChunkInfo != nil {
|
||||
chunkInfo.TxBytes = proof.ChunkInfo.TxBytes
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
|
||||
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
|
||||
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
taskDetail := message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
if hardForkName == "darwin" {
|
||||
batchHeader, decodeErr := codecv3.NewDABatchFromBytes(batch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header, taskID:%s err:%w", task.TaskID, decodeErr)
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs, hardForkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
chunkProofsBytes, err := json.Marshal(taskDetail)
|
||||
@@ -228,11 +239,56 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
TaskData: string(chunkProofsBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBatch.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
|
||||
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
|
||||
if err := bp.batchOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
|
||||
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
|
||||
default:
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
|
||||
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
|
||||
// | z | y | kzg_commitment | kzg_proof |
|
||||
// |---------|---------|----------------|-----------|
|
||||
// | bytes32 | bytes32 | bytes48 | bytes48 |
|
||||
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
|
||||
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
|
||||
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
@@ -9,18 +9,19 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// BundleProverTask is prover task implement for bundle proof
|
||||
@@ -63,7 +64,20 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedBundleCount, getCountError := bp.bundleOrm.GetUnassignedBundleCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned bundle proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedBundleCount < bp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var bundleTask *orm.Bundle
|
||||
var hardForkName string
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBundleTask *orm.Bundle
|
||||
@@ -88,6 +102,30 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
taskCtx.taskType = message.ProofTypeBundle
|
||||
taskCtx.bundleTask = tmpBundleTask
|
||||
|
||||
var checkErr error
|
||||
hardForkName, checkErr = bp.hardForkSanityCheck(ctx, taskCtx)
|
||||
if checkErr != nil {
|
||||
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -109,14 +147,6 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
}
|
||||
|
||||
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
hardForkName, getHardForkErr := bp.hardForkName(ctx, bundleTask)
|
||||
if getHardForkErr != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("retrieve hard fork name by bundle failed", "task_id", bundleTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: bundleTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
@@ -153,26 +183,6 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BundleProverTask) hardForkName(ctx *gin.Context, bundleTask *orm.Bundle) (string, error) {
|
||||
startBatch, getBatchErr := bp.batchOrm.GetBatchByHash(ctx, bundleTask.StartBatchHash)
|
||||
if getBatchErr != nil {
|
||||
return "", getBatchErr
|
||||
}
|
||||
|
||||
startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, startBatch.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
|
||||
l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
|
||||
hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get bundle from db
|
||||
batches, err := bp.batchOrm.GetBatchesByBundleHash(ctx, task.TaskID)
|
||||
@@ -185,9 +195,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
|
||||
}
|
||||
|
||||
var batchProofs []*message.BatchProof
|
||||
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.BatchProof
|
||||
var proof message.OpenVMBatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
|
||||
}
|
||||
@@ -198,6 +213,24 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
BatchProofs: batchProofs,
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
|
||||
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
|
||||
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
|
||||
NumBatches: uint32(len(batches)),
|
||||
PrevBatchHash: common.HexToHash(batches[0].ParentBatchHash),
|
||||
BatchHash: common.HexToHash(batches[len(batches)-1].Hash),
|
||||
MsgQueueHash: common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash),
|
||||
}
|
||||
|
||||
batchProofsBytes, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", task.TaskID, err)
|
||||
@@ -210,6 +243,9 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
TaskData: string(batchProofsBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBundle.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,11 +9,11 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
// ChunkProverTask the chunk prover task
|
||||
@@ -61,11 +62,24 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedChunkCount, getCountError := cp.chunkOrm.GetUnassignedChunkCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedChunkCount < cp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var chunkTask *orm.Chunk
|
||||
var hardForkName string
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpChunkTask *orm.Chunk
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -74,7 +88,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpChunkTask == nil {
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -86,6 +100,30 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
taskCtx.taskType = message.ProofTypeChunk
|
||||
taskCtx.chunkTask = tmpChunkTask
|
||||
|
||||
var checkErr error
|
||||
hardForkName, checkErr = cp.hardForkSanityCheck(ctx, taskCtx)
|
||||
if checkErr != nil {
|
||||
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -107,14 +145,6 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
hardForkName, getHardForkErr := cp.hardForkName(ctx, chunkTask)
|
||||
if getHardForkErr != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("retrieve hard fork name by chunk failed", "task_id", chunkTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
@@ -133,7 +163,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
|
||||
@@ -150,26 +180,28 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) hardForkName(ctx *gin.Context, chunkTask *orm.Chunk) (string, error) {
|
||||
l2Block, getBlockErr := cp.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunkTask.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := forks.GetHardforkName(cp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, chunk *orm.Chunk, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// Get block hashes.
|
||||
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
|
||||
if dbErr != nil || len(blockHashes) == 0 {
|
||||
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
|
||||
}
|
||||
|
||||
var taskDetailBytes []byte
|
||||
taskDetail := message.ChunkTaskDetail{
|
||||
BlockHashes: blockHashes,
|
||||
BlockHashes: blockHashes,
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
}
|
||||
blockHashesBytes, err := json.Marshal(taskDetail)
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
var err error
|
||||
taskDetailBytes, err = json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)
|
||||
}
|
||||
@@ -178,10 +210,12 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
UUID: task.UUID.String(),
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeChunk),
|
||||
TaskData: string(blockHashesBytes),
|
||||
TaskData: string(taskDetailBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeChunk.String(), "hard_fork_name", hardForkName, "task_data", proverTaskSchema.TaskData)
|
||||
|
||||
return proverTaskSchema, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,14 +3,18 @@ package provertask
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
@@ -46,14 +50,87 @@ type BaseProverTask struct {
|
||||
}
|
||||
|
||||
type proverTaskContext struct {
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
ProverProviderType uint8
|
||||
HardForkNames map[string]struct{}
|
||||
|
||||
taskType message.ProofType
|
||||
chunkTask *orm.Chunk
|
||||
batchTask *orm.Batch
|
||||
bundleTask *orm.Bundle
|
||||
}
|
||||
|
||||
// hardForkName get the chunk/batch/bundle hard fork name
|
||||
func (b *BaseProverTask) hardForkName(ctx *gin.Context, taskCtx *proverTaskContext) (string, error) {
|
||||
switch {
|
||||
case taskCtx.taskType == message.ProofTypeChunk:
|
||||
if taskCtx.chunkTask == nil {
|
||||
return "", errors.New("chunk task is nil")
|
||||
}
|
||||
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), taskCtx.chunkTask.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
|
||||
case taskCtx.taskType == message.ProofTypeBatch:
|
||||
if taskCtx.batchTask == nil {
|
||||
return "", errors.New("batch task is nil")
|
||||
}
|
||||
startChunk, getChunkErr := b.chunkOrm.GetChunkByHash(ctx, taskCtx.batchTask.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
|
||||
case taskCtx.taskType == message.ProofTypeBundle:
|
||||
if taskCtx.bundleTask == nil {
|
||||
return "", errors.New("bundle task is nil")
|
||||
}
|
||||
startBatch, getBatchErr := b.batchOrm.GetBatchByHash(ctx, taskCtx.bundleTask.StartBatchHash)
|
||||
if getBatchErr != nil {
|
||||
return "", getBatchErr
|
||||
}
|
||||
startChunk, getChunkErr := b.chunkOrm.GetChunkByHash(ctx, startBatch.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
default:
|
||||
return "", errors.New("illegal task type")
|
||||
}
|
||||
}
|
||||
|
||||
// hardForkSanityCheck check the task's hard fork name is the same as prover
|
||||
func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTaskContext) (string, error) {
|
||||
hardForkName, getHardForkErr := b.hardForkName(ctx, taskCtx)
|
||||
if getHardForkErr != nil {
|
||||
return "", getHardForkErr
|
||||
}
|
||||
|
||||
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
|
||||
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, error) {
|
||||
var ptc proverTaskContext
|
||||
ptc.HardForkNames = make(map[string]struct{})
|
||||
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
@@ -73,6 +150,22 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
ProverProviderType, ProverProviderTypeExist := ctx.Get(coordinatorType.ProverProviderTypeKey)
|
||||
if !ProverProviderTypeExist {
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
ProverProviderType = float64(coordinatorType.ProverProviderTypeInternal)
|
||||
}
|
||||
ptc.ProverProviderType = uint8(ProverProviderType.(float64))
|
||||
|
||||
hardForkNamesStr, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, errors.New("get hard fork name from context failed")
|
||||
}
|
||||
hardForkNames := strings.Split(hardForkNamesStr.(string), ",")
|
||||
for _, hardForkName := range hardForkNames {
|
||||
ptc.HardForkNames[hardForkName] = struct{}{}
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
|
||||
|
||||
@@ -10,11 +10,11 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
@@ -171,23 +171,23 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
|
||||
switch message.ProofType(proofParameter.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var chunkProof message.ChunkProof
|
||||
chunkProof := &message.OpenVMChunkProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(&chunkProof, hardForkName)
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
|
||||
case message.ProofTypeBatch:
|
||||
var batchProof message.BatchProof
|
||||
batchProof := &message.OpenVMBatchProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(&batchProof, hardForkName)
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
|
||||
case message.ProofTypeBundle:
|
||||
var bundleProof message.BundleProof
|
||||
bundleProof := &message.OpenVMBundleProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBundleProof(&bundleProof)
|
||||
success, verifyErr = m.verifier.VerifyBundleProof(bundleProof, hardForkName)
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
@@ -265,7 +265,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofParameter.Status != int(message.StatusOk) {
|
||||
if proofParameter.Status != int(coordinatorType.StatusOk) {
|
||||
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
|
||||
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
|
||||
|
||||
@@ -462,6 +462,6 @@ func (m *ProofReceiverLogic) hardForkName(ctx *gin.Context, hash string, proofTy
|
||||
return "", getBlockErr
|
||||
}
|
||||
|
||||
hardForkName := forks.GetHardforkName(m.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
hardForkName := encoding.GetHardforkName(m.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
work_dir="$(dirname -- "${BASH_SOURCE[0]}")"
|
||||
work_dir="$(cd -- "$work_dir" && pwd)"
|
||||
echo $work_dir
|
||||
|
||||
rm $work_dir/*.vkey
|
||||
|
||||
version=release-v0.11.4
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/${version}/chunk_vk.vkey -O $work_dir/chunk_vk.vkey
|
||||
wget https://circuit-release.s3.us-west-2.amazonaws.com/${version}/agg_vk.vkey -O $work_dir/agg_vk.vkey
|
||||
@@ -10,31 +10,26 @@ import (
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
chunkVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
|
||||
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof return a mock verification result for a BatchProof.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
|
||||
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBundleProof return a mock verification result for a BundleProof.
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.BundleProof) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -7,10 +7,8 @@ import (
|
||||
// InvalidTestProof invalid proof used in tests
|
||||
const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
// Verifier represents a rust ffi to a verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]string
|
||||
BatchVKMap map[string]string
|
||||
BundleVkMap map[string]string
|
||||
OpenVMVkMap map[string]struct{}
|
||||
}
|
||||
|
||||
@@ -11,11 +11,9 @@ package verifier
|
||||
import "C" //nolint:typecheck
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"unsafe"
|
||||
@@ -27,65 +25,69 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
// This struct maps to `CircuitConfig` in common/libzkp/impl/src/verifier.rs
|
||||
// Define a brand new struct here is to eliminate side effects in case fields
|
||||
// in `*config.CircuitConfig` being changed
|
||||
type rustCircuitConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
}
|
||||
|
||||
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
|
||||
return &rustCircuitConfig{
|
||||
ForkName: cfg.ForkName,
|
||||
AssetsPath: cfg.AssetsPath,
|
||||
}
|
||||
}
|
||||
|
||||
// This struct maps to `VerifierConfig` in common/libzkp/impl/src/verifier.rs
|
||||
// Define a brand new struct here is to eliminate side effects in case fields
|
||||
// in `*config.VerifierConfig` being changed
|
||||
type rustVerifierConfig struct {
|
||||
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
|
||||
}
|
||||
|
||||
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
|
||||
return &rustVerifierConfig{
|
||||
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
|
||||
}
|
||||
}
|
||||
|
||||
type rustVkDump struct {
|
||||
Chunk string `json:"chunk_vk"`
|
||||
Batch string `json:"batch_vk"`
|
||||
Bundle string `json:"bundle_vk"`
|
||||
}
|
||||
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
chunkVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
batchVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
bundleVKMap := map[string]string{cfg.ForkName: "mock_vk"}
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap, BundleVkMap: bundleVKMap}, nil
|
||||
verifierConfig := newRustVerifierConfig(cfg)
|
||||
configBytes, err := json.Marshal(verifierConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
paramsPathStr := C.CString(cfg.ParamsPath)
|
||||
assetsPathLoStr := C.CString(cfg.AssetsPathLo)
|
||||
assetsPathHiStr := C.CString(cfg.AssetsPathHi)
|
||||
|
||||
configStr := C.CString(string(configBytes))
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(paramsPathStr))
|
||||
C.free(unsafe.Pointer(assetsPathLoStr))
|
||||
C.free(unsafe.Pointer(assetsPathHiStr))
|
||||
C.free(unsafe.Pointer(configStr))
|
||||
}()
|
||||
|
||||
C.init_batch_verifier(paramsPathStr, assetsPathHiStr)
|
||||
C.init_chunk_verifier(paramsPathStr, assetsPathLoStr, assetsPathHiStr)
|
||||
C.init(configStr)
|
||||
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]string),
|
||||
BatchVKMap: make(map[string]string),
|
||||
BundleVkMap: make(map[string]string),
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
bundleVK, err := v.readVK(path.Join(cfg.AssetsPathHi, "vk_bundle.vkey"))
|
||||
if err != nil {
|
||||
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
batchVK, err := v.readVK(path.Join(cfg.AssetsPathHi, "vk_batch.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkVK, err := v.readVK(path.Join(cfg.AssetsPathHi, "vk_chunk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.BundleVkMap[cfg.ForkName] = bundleVK
|
||||
v.BatchVKMap[cfg.ForkName] = batchVK
|
||||
v.ChunkVKMap[cfg.ForkName] = chunkVK
|
||||
|
||||
if err := v.loadEmbedVK(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, batch verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -103,16 +105,8 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -131,27 +125,21 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string)
|
||||
}
|
||||
|
||||
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.BundleProof) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
proofStr := C.CString(string(buf))
|
||||
forkNameStr := C.CString(forkName)
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(proofStr))
|
||||
C.free(unsafe.Pointer(forkNameStr))
|
||||
}()
|
||||
|
||||
log.Info("Start to verify bundle proof ...")
|
||||
verified := C.verify_bundle_proof(proofStr)
|
||||
verified := C.verify_bundle_proof(proofStr, forkNameStr)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
@@ -167,23 +155,36 @@ func (v *Verifier) readVK(filePat string) (string, error) {
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
}
|
||||
|
||||
//go:embed legacy_vk/*
|
||||
var legacyVKFS embed.FS
|
||||
func (v *Verifier) loadOpenVMVks(forkName string) error {
|
||||
tempFile := path.Join(os.TempDir(), "openVmVk.json")
|
||||
defer func() {
|
||||
if err := os.Remove(tempFile); err != nil {
|
||||
log.Error("failed to remove temp file", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
func (v *Verifier) loadEmbedVK() error {
|
||||
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
|
||||
forkNameCStr := C.CString(forkName)
|
||||
defer C.free(unsafe.Pointer(forkNameCStr))
|
||||
tempFileCStr := C.CString(tempFile)
|
||||
defer C.free(unsafe.Pointer(tempFileCStr))
|
||||
|
||||
C.dump_vk(forkNameCStr, tempFileCStr)
|
||||
|
||||
f, err := os.Open(tempFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
byt, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
log.Error("load embed batch vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed chunk vk failure", "err", err)
|
||||
var dump rustVkDump
|
||||
if err := json.Unmarshal(byt, &dump); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["curie"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["curie"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.OpenVMVkMap[dump.Chunk] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Batch] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Bundle] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,15 +11,15 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
assetsPathLo = flag.String("assets", "/assets/test_assets_lo", "assets dir")
|
||||
assetsPathHi = flag.String("assets", "/assets/test_assets_hi", "assets dir")
|
||||
assetsPathLo = flag.String("assets_lo", "/assets/test_assets_lo", "assets dir")
|
||||
assetsPathHi = flag.String("assets", "/assets/test_assets", "assets dir")
|
||||
batchProofPath = flag.String("batch_proof", "/assets/proof_data/batch_proof", "batch proof file path")
|
||||
chunkProofPath1 = flag.String("chunk_proof1", "/assets/proof_data/chunk_proof1", "chunk proof file path 1")
|
||||
chunkProofPath2 = flag.String("chunk_proof2", "/assets/proof_data/chunk_proof2", "chunk proof file path 2")
|
||||
@@ -29,53 +29,54 @@ func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPathLo: *assetsPathLo,
|
||||
AssetsPathHi: *assetsPathHi,
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
AssetsPath: *assetsPathHi,
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "",
|
||||
},
|
||||
}
|
||||
|
||||
v, err := NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1)
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(chunkOk1)
|
||||
t.Log("Verified chunk proof 1")
|
||||
|
||||
chunkProof2 := readChunkProof(*chunkProofPath2, as)
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2)
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(chunkOk2)
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "curie")
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
}
|
||||
|
||||
func readBatchProof(filePat string, as *assert.Assertions) *message.BatchProof {
|
||||
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &message.BatchProof{}
|
||||
proof := &types.OpenVMBatchProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
}
|
||||
|
||||
func readChunkProof(filePat string, as *assert.Assertions) *message.ChunkProof {
|
||||
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &message.ChunkProof{}
|
||||
proof := &types.OpenVMChunkProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -20,17 +19,23 @@ type Batch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
|
||||
PrevL1MessageQueueHash string `json:"prev_l1_message_queue_hash" gorm:"column:prev_l1_message_queue_hash"`
|
||||
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
|
||||
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
|
||||
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
|
||||
ChallengeDigest string `json:"challenge_digest" gorm:"column:challenge_digest"`
|
||||
|
||||
// proof
|
||||
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
|
||||
@@ -81,7 +86,7 @@ func (*Batch) TableName() string {
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
var batch Batch
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND codec_version != 5 AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady))
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
@@ -93,6 +98,23 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTo
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// GetUnassignedBatchCount retrieves unassigned batch count.
|
||||
func (o *Batch) GetUnassignedBatchCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
db = db.Where("codec_version != 5")
|
||||
db = db.Where("batch.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Batch.GetUnassignedBatchCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatch retrieves assigned batch based on the specified limit.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
@@ -225,6 +247,19 @@ func (o *Batch) GetBatchesByBundleHash(ctx context.Context, bundleHash string) (
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetBatchByIndex retrieves the batch by the given index.
|
||||
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("index = ?", index)
|
||||
|
||||
var batch Batch
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if batch == nil {
|
||||
@@ -236,11 +271,16 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
return nil, errors.New("invalid args: batch contains 0 chunk")
|
||||
}
|
||||
|
||||
daBatch, err := codecv0.NewDABatch(batch)
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
daBatch, err := codec.NewDABatch(batch)
|
||||
if err != nil {
|
||||
log.Error("failed to create new DA batch",
|
||||
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -248,7 +288,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
@@ -259,17 +299,17 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
startChunkIndex = parentBatch.EndChunkIndex + 1
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
startDAChunk, err := codec.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("failed to create start DA chunk", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
startDAChunkHash, err := startDAChunk.Hash()
|
||||
if err != nil {
|
||||
log.Error("failed to get start DA chunk hash", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
@@ -277,24 +317,24 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
for i := uint64(0); i < numChunks-1; i++ {
|
||||
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
|
||||
}
|
||||
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
endDAChunk, err := codec.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
log.Error("failed to create end DA chunk", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
endDAChunkHash, err := endDAChunk.Hash()
|
||||
if err != nil {
|
||||
log.Error("failed to get end DA chunk hash", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
DataHash: daBatch.DataHash().Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
|
||||
@@ -59,7 +59,7 @@ func (*Bundle) TableName() string {
|
||||
func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
|
||||
var bundle Bundle
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
|
||||
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND codec_version != 5 AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady))
|
||||
err := db.Raw(sql).Scan(&bundle).Error
|
||||
if err != nil {
|
||||
@@ -71,6 +71,23 @@ func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, max
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// GetUnassignedBundleCount retrieves unassigned bundle count.
|
||||
func (o *Bundle) GetUnassignedBundleCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("batch_proofs_status = ?", int(types.BatchProofsStatusReady))
|
||||
db = db.Where("codec_version != 5")
|
||||
db = db.Where("bundle.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Bundle.GetUnassignedBundleCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedBundle retrieves assigned bundle based on the specified limit.
|
||||
// The returned bundle sorts in ascending order by their index.
|
||||
func (o *Bundle) GetAssignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
|
||||
|
||||
@@ -2,18 +2,15 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv0"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
@@ -31,6 +28,8 @@ type Chunk struct {
|
||||
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
|
||||
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
|
||||
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
|
||||
PrevL1MessageQueueHash string `json:"prev_l1_message_queue_hash" gorm:"column:prev_l1_message_queue_hash"`
|
||||
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
|
||||
ParentChunkHash string `json:"parent_chunk_hash" gorm:"column:parent_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
ParentChunkStateRoot string `json:"parent_chunk_state_root" gorm:"column:parent_chunk_state_root"`
|
||||
@@ -74,11 +73,11 @@ func (*Chunk) TableName() string {
|
||||
|
||||
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) {
|
||||
var chunk Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND end_block_number <= %d AND codec_version != 5 AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, height)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
|
||||
@@ -89,13 +88,30 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTo
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// GetUnassignedChunkCount retrieves unassigned chunk count.
|
||||
func (o *Chunk) GetUnassignedChunkCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("end_block_number <= ?", height)
|
||||
db = db.Where("codec_version != 5")
|
||||
db = db.Where("chunk.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Chunk.GetUnassignedChunkCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) {
|
||||
var chunk Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND end_block_number <= %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, height)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
|
||||
@@ -121,32 +137,6 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetProofsByBatchHash retrieves the proofs associated with a specific batch hash.
|
||||
// It returns a slice of decoded proofs (message.ChunkProof) obtained from the database.
|
||||
// The returned proofs are sorted in ascending order by their associated chunk index.
|
||||
func (o *Chunk) GetProofsByBatchHash(ctx context.Context, batchHash string) ([]*message.ChunkProof, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash", batchHash)
|
||||
db = db.Order("index ASC")
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash error: %w, batch hash: %v", err, batchHash)
|
||||
}
|
||||
|
||||
var proofs []*message.ChunkProof
|
||||
for _, chunk := range chunks {
|
||||
var proof message.ChunkProof
|
||||
if err := json.Unmarshal(chunk.Proof, &proof); err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", err, batchHash, chunk.Hash)
|
||||
}
|
||||
proofs = append(proofs, &proof)
|
||||
}
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
// getLatestChunk retrieves the latest chunk from the database.
|
||||
func (o *Chunk) getLatestChunk(ctx context.Context) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -258,7 +248,12 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
parentChunkStateRoot = parentChunk.StateRoot
|
||||
}
|
||||
|
||||
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
daChunk, err := codec.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("failed to initialize new DA chunk", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
@@ -270,13 +265,13 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
totalL1CommitCalldataSize, err := codec.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
|
||||
totalL1CommitGas, err := codec.EstimateChunkL1CommitGas(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit gas", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
@@ -290,7 +285,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
|
||||
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: chunk.L2GasUsed(),
|
||||
TotalL2TxGas: chunk.TotalGasUsed(),
|
||||
TotalL2TxNum: chunk.NumL2Transactions(),
|
||||
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
|
||||
@@ -87,6 +87,55 @@ func (o *L2Block) GetL2BlockByNumber(ctx context.Context, blockNumber uint64) (*
|
||||
return &l2Block, nil
|
||||
}
|
||||
|
||||
// GetL2BlocksInRange retrieves the L2 blocks within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end block numbers.
|
||||
// The returned blocks are sorted in ascending order by their block number.
|
||||
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*encoding.Block, error) {
|
||||
if startBlockNumber > endBlockNumber {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_root, row_consumption")
|
||||
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
|
||||
db = db.Order("number ASC")
|
||||
|
||||
var l2Blocks []L2Block
|
||||
if err := db.Find(&l2Blocks).Error; err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
// sanity check
|
||||
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks))
|
||||
}
|
||||
|
||||
var blocks []*encoding.Block
|
||||
for _, v := range l2Blocks {
|
||||
var block encoding.Block
|
||||
|
||||
if err := json.Unmarshal([]byte(v.Transactions), &block.Transactions); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
block.Header = &gethTypes.Header{}
|
||||
if err := json.Unmarshal([]byte(v.Header), block.Header); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
block.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
|
||||
if err := json.Unmarshal([]byte(v.RowConsumption), &block.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
blocks = append(blocks, &block)
|
||||
}
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
|
||||
// for unit test
|
||||
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block) error {
|
||||
|
||||
@@ -9,11 +9,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -65,7 +66,7 @@ func (p *ProverBlockList) IsPublicKeyBlocked(ctx context.Context, publicKey stri
|
||||
db = db.Model(&ProverBlockList{})
|
||||
db = db.Where("public_key = ?", publicKey)
|
||||
if err := db.First(&ProverBlockList{}).Error; err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return false, nil // Public key not found, hence it's not blocked.
|
||||
}
|
||||
return true, fmt.Errorf("ProverBlockList.IsPublicKeyBlocked error: %w, public key: %v", err, publicKey)
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -60,7 +61,7 @@ func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bo
|
||||
var task ProverTask
|
||||
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
@@ -116,23 +117,25 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetAssignedProverTaskByTaskIDAndProver get prover task taskID and public key
|
||||
// TODO: when prover all upgrade need DEPRECATED this function
|
||||
func (o *ProverTask) GetAssignedProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
|
||||
// GetFailedProverTasksByHash retrieves the failed ProverTask records associated with the specified hash.
|
||||
// The returned prover task objects are sorted in descending order by their ids.
|
||||
func (o *ProverTask) GetFailedProverTasksByHash(ctx context.Context, taskType message.ProofType, hash string, limit int) ([]*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("prover_public_key", proverPublicKey)
|
||||
db = db.Where("prover_version", proverVersion)
|
||||
db = db.Where("proving_status", types.ProverAssigned)
|
||||
db = db.Where("task_id", hash)
|
||||
db = db.Where("proving_status = ?", int(types.ProverProofInvalid))
|
||||
db = db.Order("id desc")
|
||||
|
||||
var proverTask ProverTask
|
||||
err := db.First(&proverTask).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndProver err:%w, taskID:%s, pubkey:%s, prover_version:%s", err, taskID, proverPublicKey, proverVersion)
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
return &proverTask, nil
|
||||
|
||||
var proverTasks []*ProverTask
|
||||
if err := db.Find(&proverTasks).Error; err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetFailedProverTasksByHash error: %w, hash: %v", err, hash)
|
||||
}
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user