mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 15:38:18 -05:00
Compare commits
49 Commits
omerfirmak
...
bump-versi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b55c858980 | ||
|
|
5d6b5a89f4 | ||
|
|
4ee459a602 | ||
|
|
276385fd0a | ||
|
|
82fb15de3b | ||
|
|
5204ad50e0 | ||
|
|
f824fb0efc | ||
|
|
a55c7bdc77 | ||
|
|
47b1a037a9 | ||
|
|
ae34020c34 | ||
|
|
fa9fab6e98 | ||
|
|
c4f869a33a | ||
|
|
0cee9a51e6 | ||
|
|
97de988228 | ||
|
|
a12175dafc | ||
|
|
fedfa04c2b | ||
|
|
0d8b00c3de | ||
|
|
826357ab5d | ||
|
|
d26381cba3 | ||
|
|
0e65686ce4 | ||
|
|
6dd878eaca | ||
|
|
a18fe06440 | ||
|
|
3ac69bec51 | ||
|
|
e80f030246 | ||
|
|
a77f1413ee | ||
|
|
5b62692098 | ||
|
|
4f34e90f00 | ||
|
|
6b1b822c81 | ||
|
|
a34c01d90b | ||
|
|
0578aab3ae | ||
|
|
55dfbf6735 | ||
|
|
f3ddf43439 | ||
|
|
a2582dcc3f | ||
|
|
0a1868cec1 | ||
|
|
fcfbc53252 | ||
|
|
13c8605211 | ||
|
|
228cba48b7 | ||
|
|
b7c5ba9046 | ||
|
|
5f7973528e | ||
|
|
69a7339bab | ||
|
|
056ebdca1c | ||
|
|
7a386ad807 | ||
|
|
5fb93c4dc3 | ||
|
|
a5e2d71ebd | ||
|
|
6b837c01f3 | ||
|
|
5b6b145753 | ||
|
|
c07975acdf | ||
|
|
dfdb2ecf07 | ||
|
|
a6f2457040 |
12
.github/workflows/common.yml
vendored
12
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
toolchain: nightly-2024-12-06
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
@@ -42,11 +42,11 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
# - name: Lint
|
||||
# working-directory: 'common'
|
||||
# run: |
|
||||
# rm -rf $HOME/.cache/golangci-lint
|
||||
# make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
87
.github/workflows/docker.yml
vendored
87
.github/workflows/docker.yml
vendored
@@ -99,6 +99,51 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
blob_uploader:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: blob-uploader
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: blob-uploader
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/blob_uploader.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -307,6 +352,48 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for repositories and clone them
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
chmod 700 ~/.ssh
|
||||
|
||||
# Setup for plonky3-gpu
|
||||
echo "${{ secrets.PLONKY3_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/plonky3_gpu_key
|
||||
chmod 600 ~/.ssh/plonky3_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/plonky3_gpu_key 2>/dev/null
|
||||
ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts 2>/dev/null
|
||||
echo "Loaded plonky3-gpu key"
|
||||
|
||||
# Clone plonky3-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
|
||||
|
||||
# Setup for openvm-stark-gpu
|
||||
echo "${{ secrets.OPENVM_STARK_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_stark_gpu_key
|
||||
chmod 600 ~/.ssh/openvm_stark_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/openvm_stark_gpu_key 2>/dev/null
|
||||
echo "Loaded openvm-stark-gpu key"
|
||||
|
||||
# Clone openvm-stark-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
|
||||
|
||||
# Setup for openvm-gpu
|
||||
echo "${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}" > ~/.ssh/openvm_gpu_key
|
||||
chmod 600 ~/.ssh/openvm_gpu_key
|
||||
eval "$(ssh-agent -s)" > /dev/null
|
||||
ssh-add ~/.ssh/openvm_gpu_key 2>/dev/null
|
||||
echo "Loaded openvm-gpu key"
|
||||
|
||||
# Clone openvm-gpu repository
|
||||
./build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
|
||||
|
||||
# Show number of loaded keys
|
||||
echo "Number of loaded keys: $(ssh-add -l | wc -l)"
|
||||
|
||||
- name: Checkout specific commits
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/checkout_all.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
|
||||
6
.github/workflows/intermediate-docker.yml
vendored
6
.github/workflows/intermediate-docker.yml
vendored
@@ -9,9 +9,13 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- "1.20"
|
||||
- "1.20.14"
|
||||
- "1.21"
|
||||
- "1.21.13"
|
||||
- "1.22"
|
||||
- "1.22.12"
|
||||
- "1.23"
|
||||
- "1.23.7"
|
||||
default: "1.21"
|
||||
RUST_VERSION:
|
||||
description: "Rust toolchain version"
|
||||
@@ -20,6 +24,7 @@ on:
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
default: "nightly-2023-12-03"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
@@ -43,6 +48,7 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- 0.1.41
|
||||
- 0.1.71
|
||||
BASE_IMAGE:
|
||||
description: "which intermediate image you want to update"
|
||||
required: true
|
||||
|
||||
99
.github/workflows/prover.yml
vendored
99
.github/workflows/prover.yml
vendored
@@ -1,99 +0,0 @@
|
||||
name: Prover
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'prover/**'
|
||||
- '.github/workflows/prover.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'prover'
|
||||
|
||||
jobs:
|
||||
skip_check:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
cancel_others: 'true'
|
||||
concurrent_skipping: 'same_content_newer'
|
||||
paths_ignore: '["**/README.md"]'
|
||||
|
||||
fmt:
|
||||
needs: [skip_check]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: rustfmt
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Cargo check
|
||||
run: cargo check --all-features
|
||||
- name: Cargo fmt
|
||||
run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
needs: [skip_check, fmt]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
components: clippy
|
||||
- name: Cargo cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Run clippy
|
||||
run: cargo clippy --all-features --all-targets -- -D warnings
|
||||
|
||||
compile:
|
||||
needs: [skip_check, clippy]
|
||||
if: |
|
||||
github.event.pull_request.draft == false &&
|
||||
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
toolchain: nightly-2023-12-03
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "prover -> target"
|
||||
- name: Test
|
||||
run: |
|
||||
make prover
|
||||
@@ -11,7 +11,7 @@ Please note that this project is released with a [Contributor Code of Conduct][c
|
||||
|
||||
## Contribute to Scroll
|
||||
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
|
||||
## Issues and PRs
|
||||
|
||||
|
||||
26
Makefile
26
Makefile
@@ -1,6 +1,6 @@
|
||||
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
|
||||
|
||||
L2GETH_TAG=ea3ec93d8c1e379bfd7957d4866f204d8d71237c
|
||||
L2GETH_TAG=scroll-v5.8.23
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
@@ -8,12 +8,12 @@ help: ## Display this help message
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
update: ## Update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/common/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/rollup/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
|
||||
lint: ## The code's format and security checks
|
||||
make -C rollup lint
|
||||
@@ -31,12 +31,12 @@ fmt: ## Format the code
|
||||
cd $(PWD)/rollup/ && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go mod tidy
|
||||
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
goimports -local $(PWD)/database/ -w .
|
||||
goimports -local $(PWD)/rollup/ -w .
|
||||
goimports -local $(PWD)/tests/integration-test/ -w .
|
||||
goimports -local scroll-tech/bridge-history-api/ -w .
|
||||
goimports -local scroll-tech/common/ -w .
|
||||
goimports -local scroll-tech/coordinator/ -w .
|
||||
goimports -local scroll-tech/database/ -w .
|
||||
goimports -local scroll-tech/rollup/ -w .
|
||||
goimports -local scroll-tech/tests/integration-test/ -w .
|
||||
|
||||
dev_docker: ## Build docker images for development/testing usages
|
||||
docker pull postgres
|
||||
|
||||
@@ -37,6 +37,6 @@ reset-env:
|
||||
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
|
||||
|
||||
bridgehistoryapi-docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile --platform=linux/amd64
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile --platform=linux/amd64
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile --platform=linux/amd64
|
||||
|
||||
File diff suppressed because one or more lines are too long
13
bridge-history-api/abi/backend_abi_test.go
Normal file
13
bridge-history-api/abi/backend_abi_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package backendabi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEventSignatures(t *testing.T) {
|
||||
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), L1RevertBatchV0EventSig)
|
||||
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,uint256)")), L1RevertBatchV7EventSig)
|
||||
}
|
||||
@@ -68,7 +68,10 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
observability.Server(ctx, db)
|
||||
|
||||
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
|
||||
l1MessageFetcher, err := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
|
||||
if err != nil {
|
||||
log.Crit("failed to create L1MessageFetcher", "err", err)
|
||||
}
|
||||
go l1MessageFetcher.Start()
|
||||
|
||||
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)
|
||||
|
||||
@@ -19,9 +19,11 @@
|
||||
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
|
||||
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
|
||||
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
|
||||
"MessageQueueV2Addr": "0x0000000000000000000000000000000000000000",
|
||||
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4",
|
||||
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
|
||||
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000"
|
||||
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
|
||||
"BlobScanAPIEndpoint": "https://api.blobscan.com/blobs/"
|
||||
},
|
||||
"L2": {
|
||||
"confirmation": 0,
|
||||
|
||||
@@ -10,13 +10,16 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sync v0.11.0
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
@@ -27,8 +30,8 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.25 // indirect
|
||||
github.com/consensys/gnark-crypto v0.14.0 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.13.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
@@ -90,7 +93,6 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/sethvargo/go-retry v0.2.4 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
@@ -108,9 +110,9 @@ require (
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
|
||||
@@ -53,10 +53,10 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.25 h1:5YcSBnp03/HvfpKaIQLr/ecspTp2k8YNR5rQLOWvUyc=
|
||||
github.com/consensys/bavard v0.1.25/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E=
|
||||
github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
|
||||
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
|
||||
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
|
||||
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
@@ -214,8 +214,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
|
||||
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
@@ -309,10 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5 h1:vZ75srkZCStjDWq/kqZGLoucf7Y7qXC13nKjQVZ0zp8=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e h1:g8jtcGiHbjWYh/V7O245IDho3WfQT4CwEpBV+MhYDrg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
@@ -387,8 +387,8 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -400,8 +400,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -419,8 +419,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
||||
@@ -30,9 +30,14 @@ type FetcherConfig struct {
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
GatewayRouterAddr string `json:"GatewayRouterAddr"`
|
||||
MessageQueueAddr string `json:"MessageQueueAddr"`
|
||||
MessageQueueV2Addr string `json:"MessageQueueV2Addr"`
|
||||
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
|
||||
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
|
||||
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
|
||||
|
||||
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
|
||||
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
|
||||
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
|
||||
}
|
||||
|
||||
// RedisConfig redis config
|
||||
|
||||
@@ -2,6 +2,7 @@ package fetcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
@@ -35,13 +37,32 @@ type L1MessageFetcher struct {
|
||||
}
|
||||
|
||||
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
|
||||
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
|
||||
blobClient := blob_client.NewBlobClients()
|
||||
if cfg.BeaconNodeAPIEndpoint != "" {
|
||||
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
|
||||
if err != nil {
|
||||
log.Warn("failed to create BeaconNodeClient", "err", err)
|
||||
} else {
|
||||
blobClient.AddBlobClient(beaconNodeClient)
|
||||
}
|
||||
}
|
||||
if cfg.BlobScanAPIEndpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewBlobScanClient(cfg.BlobScanAPIEndpoint))
|
||||
}
|
||||
if cfg.BlockNativeAPIEndpoint != "" {
|
||||
blobClient.AddBlobClient(blob_client.NewBlockNativeClient(cfg.BlockNativeAPIEndpoint))
|
||||
}
|
||||
if blobClient.Size() == 0 {
|
||||
return nil, fmt.Errorf("no blob client is configured")
|
||||
}
|
||||
|
||||
c := &L1MessageFetcher{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
|
||||
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
|
||||
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client, blobClient),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -58,7 +79,7 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
|
||||
Help: "Latest blockchain height the L1 message fetcher has synced with.",
|
||||
})
|
||||
|
||||
return c
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Start starts the L1 message fetching process.
|
||||
|
||||
@@ -2,13 +2,16 @@ package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
"scroll-tech/bridge-history-api/internal/config"
|
||||
@@ -19,15 +22,17 @@ import (
|
||||
|
||||
// L1EventParser the l1 event parser
|
||||
type L1EventParser struct {
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
cfg *config.FetcherConfig
|
||||
client *ethclient.Client
|
||||
blobClient blob_client.BlobClient
|
||||
}
|
||||
|
||||
// NewL1EventParser creates l1 event parser
|
||||
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
|
||||
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client, blobClient blob_client.BlobClient) *L1EventParser {
|
||||
return &L1EventParser{
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
cfg: cfg,
|
||||
client: client,
|
||||
blobClient: blobClient,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +237,21 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
|
||||
}
|
||||
|
||||
// ParseL1BatchEventLogs parses L1 watched batch events.
|
||||
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
|
||||
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client, blockTimestampsMap map[uint64]uint64) ([]*orm.BatchEvent, error) {
|
||||
// Since multiple CommitBatch events per transaction is introduced >= CodecV7,
|
||||
// with one transaction carrying multiple blobs,
|
||||
// each CommitBatch event corresponds to a blob containing block range data.
|
||||
// To correctly process these events, we need to:
|
||||
// 1. Parsing the associated blob data to extract the block range for each event
|
||||
// 2. Tracking the parent batch hash for each processed CommitBatch event, to:
|
||||
// - Validate the batch hash, since parent batch hash is needed to calculate the batch hash
|
||||
// - Derive the index of the current batch by the number of parent batch hashes tracked
|
||||
// In commitBatches and commitAndFinalizeBatch, the parent batch hash is passed in calldata,
|
||||
// so that we can use it to get the first batch's parent batch hash, and derive the rest.
|
||||
// The index map serves this purpose with:
|
||||
// Key: commit transaction hash
|
||||
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
|
||||
txBlobIndexMap := make(map[common.Hash][]common.Hash)
|
||||
var l1BatchEvents []*orm.BatchEvent
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
@@ -247,11 +266,59 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
|
||||
return nil, err
|
||||
}
|
||||
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
|
||||
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
|
||||
return nil, err
|
||||
}
|
||||
if version >= 7 { // It's a batch with version >= 7.
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(version))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unsupported codec version: %v, err: %w", version, err)
|
||||
}
|
||||
|
||||
// we append the batch hash to the slice for the current commit transaction after processing the batch.
|
||||
// that means the current index of the batch within the transaction is len(txBlobIndexMap[vlog.TxHash]).
|
||||
currentIndex := len(txBlobIndexMap[vlog.TxHash])
|
||||
if currentIndex >= len(commitTx.BlobHashes()) {
|
||||
return nil, fmt.Errorf("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)",
|
||||
vlog.TxHash.String(), len(commitTx.BlobHashes()), currentIndex, event.BatchIndex.Uint64())
|
||||
}
|
||||
blobVersionedHash := commitTx.BlobHashes()[currentIndex]
|
||||
|
||||
// validate the batch hash
|
||||
var parentBatchHash common.Hash
|
||||
if currentIndex == 0 {
|
||||
parentBatchHash, err = utils.GetParentBatchHashFromCalldata(commitTx.Data())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch header from calldata, tx hash: %s, err: %w", vlog.TxHash.String(), err)
|
||||
}
|
||||
} else {
|
||||
// here we need to subtract 1 from the current index to get the parent batch hash.
|
||||
parentBatchHash = txBlobIndexMap[vlog.TxHash][currentIndex-1]
|
||||
}
|
||||
calculatedBatch, err := codec.NewDABatchFromParams(event.BatchIndex.Uint64(), blobVersionedHash, parentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex.Uint64(), err)
|
||||
}
|
||||
if calculatedBatch.Hash() != event.BatchHash {
|
||||
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
|
||||
}
|
||||
|
||||
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
|
||||
}
|
||||
if len(blocks) == 0 {
|
||||
return nil, fmt.Errorf("no blocks found in the blob, blobVersionedHash: %s, block number: %d, blob index: %d",
|
||||
blobVersionedHash.String(), vlog.BlockNumber, currentIndex)
|
||||
}
|
||||
startBlock = blocks[0].Number()
|
||||
endBlock = blocks[len(blocks)-1].Number()
|
||||
|
||||
txBlobIndexMap[vlog.TxHash] = append(txBlobIndexMap[vlog.TxHash], event.BatchHash)
|
||||
}
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(btypes.BatchStatusTypeCommitted),
|
||||
BatchIndex: event.BatchIndex.Uint64(),
|
||||
@@ -260,8 +327,8 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
EndBlockNumber: endBlock,
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L1RevertBatchEventSig:
|
||||
event := backendabi.L1RevertBatchEvent{}
|
||||
case backendabi.L1RevertBatchV0EventSig:
|
||||
event := backendabi.L1RevertBatchV0Event{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
|
||||
log.Error("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
@@ -272,6 +339,19 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
|
||||
BatchHash: event.BatchHash.String(),
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
case backendabi.L1RevertBatchV7EventSig:
|
||||
event := backendabi.L1RevertBatchV7Event{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch0", vlog); err != nil {
|
||||
log.Error("Failed to unpack RevertBatch event", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
for i := event.StartBatchIndex.Uint64(); i <= event.FinishBatchIndex.Uint64(); i++ {
|
||||
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
|
||||
BatchStatus: int(btypes.BatchStatusTypeReverted),
|
||||
BatchIndex: i,
|
||||
L1BlockNumber: vlog.BlockNumber,
|
||||
})
|
||||
}
|
||||
case backendabi.L1FinalizeBatchEventSig:
|
||||
event := backendabi.L1FinalizeBatchEvent{}
|
||||
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
|
||||
@@ -389,3 +469,27 @@ func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMe
|
||||
}
|
||||
return sender.String(), nil
|
||||
}
|
||||
|
||||
func (e *L1EventParser) getBatchBlockRangeFromBlob(ctx context.Context, codec encoding.Codec, blobVersionedHash common.Hash, l1BlockTime uint64) ([]encoding.DABlock, error) {
|
||||
blob, err := e.blobClient.GetBlobByVersionedHashAndBlockTime(ctx, blobVersionedHash, l1BlockTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get blob %s: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
if blob == nil {
|
||||
return nil, fmt.Errorf("blob %s not found", blobVersionedHash.Hex())
|
||||
}
|
||||
|
||||
blobPayload, err := codec.DecodeBlob(blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blob %s decode error: %w", blobVersionedHash.Hex(), err)
|
||||
}
|
||||
|
||||
blocks := blobPayload.Blocks()
|
||||
if len(blocks) == 0 {
|
||||
return nil, fmt.Errorf("empty blocks in blob %s", blobVersionedHash.Hex())
|
||||
}
|
||||
|
||||
log.Debug("Successfully processed blob", "blobVersionedHash", blobVersionedHash.Hex(), "blocksCount", len(blocks))
|
||||
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
|
||||
"gorm.io/gorm"
|
||||
|
||||
backendabi "scroll-tech/bridge-history-api/abi"
|
||||
@@ -49,7 +50,7 @@ type L1FetcherLogic struct {
|
||||
}
|
||||
|
||||
// NewL1FetcherLogic creates L1 fetcher logic
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
|
||||
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client, blobClient blob_client.BlobClient) *L1FetcherLogic {
|
||||
addressList := []common.Address{
|
||||
common.HexToAddress(cfg.StandardERC20GatewayAddr),
|
||||
common.HexToAddress(cfg.CustomERC20GatewayAddr),
|
||||
@@ -119,6 +120,10 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.MessageQueueV2Addr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.MessageQueueV2Addr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
@@ -129,7 +134,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
client: client,
|
||||
addressList: addressList,
|
||||
gatewayList: gatewayList,
|
||||
parser: NewL1EventParser(cfg, client),
|
||||
parser: NewL1EventParser(cfg, client, blobClient),
|
||||
}
|
||||
|
||||
reg := prometheus.DefaultRegisterer
|
||||
@@ -168,14 +173,10 @@ func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to u
|
||||
return false, 0, lastBlockHash, blocks, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
|
||||
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) ([]*orm.CrossMessage, error) {
|
||||
var l1RevertedTxs []*orm.CrossMessage
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
|
||||
for i := from; i <= to; i++ {
|
||||
block := blocks[i-from]
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
// Gateways: L1 deposit.
|
||||
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
|
||||
@@ -187,7 +188,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
|
||||
if receiptErr != nil {
|
||||
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
|
||||
return nil, nil, receiptErr
|
||||
return nil, receiptErr
|
||||
}
|
||||
|
||||
// Check if the transaction is failed
|
||||
@@ -199,7 +200,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
sender, senderErr := signer.Sender(tx)
|
||||
if senderErr != nil {
|
||||
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
|
||||
return nil, nil, senderErr
|
||||
return nil, senderErr
|
||||
}
|
||||
|
||||
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
|
||||
@@ -213,7 +214,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
|
||||
})
|
||||
}
|
||||
}
|
||||
return blockTimestampsMap, l1RevertedTxs, nil
|
||||
return l1RevertedTxs, nil
|
||||
}
|
||||
|
||||
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
|
||||
@@ -224,7 +225,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
|
||||
query.Topics[0] = make([]common.Hash, 16)
|
||||
query.Topics[0] = make([]common.Hash, 17)
|
||||
query.Topics[0][0] = backendabi.L1DepositETHSig
|
||||
query.Topics[0][1] = backendabi.L1DepositERC20Sig
|
||||
query.Topics[0][2] = backendabi.L1DepositERC721Sig
|
||||
@@ -233,14 +234,15 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
|
||||
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
|
||||
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
|
||||
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
|
||||
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
|
||||
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
|
||||
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
|
||||
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
|
||||
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
|
||||
query.Topics[0][13] = backendabi.L1ResetDequeuedTransactionEventSig
|
||||
query.Topics[0][14] = backendabi.L1BridgeBatchDepositSig
|
||||
query.Topics[0][15] = backendabi.L1DepositWrappedTokenSig
|
||||
query.Topics[0][8] = backendabi.L1RevertBatchV0EventSig
|
||||
query.Topics[0][9] = backendabi.L1RevertBatchV7EventSig
|
||||
query.Topics[0][10] = backendabi.L1FinalizeBatchEventSig
|
||||
query.Topics[0][11] = backendabi.L1QueueTransactionEventSig
|
||||
query.Topics[0][12] = backendabi.L1DequeueTransactionEventSig
|
||||
query.Topics[0][13] = backendabi.L1DropTransactionEventSig
|
||||
query.Topics[0][14] = backendabi.L1ResetDequeuedTransactionEventSig
|
||||
query.Topics[0][15] = backendabi.L1BridgeBatchDepositSig
|
||||
query.Topics[0][16] = backendabi.L1DepositWrappedTokenSig
|
||||
|
||||
eventLogs, err := f.client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
@@ -264,12 +266,18 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return isReorg, reorgHeight, blockHash, nil, nil
|
||||
}
|
||||
|
||||
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
|
||||
l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
|
||||
if err != nil {
|
||||
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
// Map block number to block timestamp to avoid fetching block header multiple times to get block timestamp.
|
||||
blockTimestampsMap := make(map[uint64]uint64)
|
||||
for _, block := range blocks {
|
||||
blockTimestampsMap[block.NumberU64()] = block.Time()
|
||||
}
|
||||
|
||||
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
|
||||
if err != nil {
|
||||
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
|
||||
@@ -282,7 +290,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
|
||||
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client, blockTimestampsMap)
|
||||
if err != nil {
|
||||
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
|
||||
return false, 0, common.Hash{}, nil, err
|
||||
|
||||
@@ -117,7 +117,7 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
|
||||
}
|
||||
case btypes.BatchStatusTypeReverted:
|
||||
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
|
||||
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
|
||||
db = db.Where("batch_status != ?", btypes.BatchStatusTypeFinalized)
|
||||
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("failed to update batch event, error: %w", err)
|
||||
|
||||
@@ -154,7 +154,7 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
|
||||
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
|
||||
@@ -66,25 +66,26 @@ func ComputeMessageHash(
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
// GetBatchVersionAndBlockRangeFromCalldata find the block range from calldata, both inclusive.
|
||||
func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uint64, error) {
|
||||
const methodIDLength = 4
|
||||
if len(txData) < methodIDLength {
|
||||
return 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
return 0, 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
}
|
||||
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
}
|
||||
values, err := method.Inputs.Unpack(txData[methodIDLength:])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
}
|
||||
|
||||
var chunks [][]byte
|
||||
var version uint8
|
||||
|
||||
if method.Name == "importGenesisBatch" {
|
||||
return 0, 0, nil
|
||||
return 0, 0, 0, nil
|
||||
} else if method.Name == "commitBatch" {
|
||||
type commitBatchArgs struct {
|
||||
Version uint8
|
||||
@@ -95,11 +96,11 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
|
||||
var args commitBatchArgs
|
||||
if err = method.Inputs.Copy(&args, values); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
|
||||
}
|
||||
|
||||
chunks = args.Chunks
|
||||
|
||||
version = args.Version
|
||||
} else if method.Name == "commitBatchWithBlobProof" {
|
||||
type commitBatchWithBlobProofArgs struct {
|
||||
Version uint8
|
||||
@@ -111,10 +112,22 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
|
||||
var args commitBatchWithBlobProofArgs
|
||||
if err = method.Inputs.Copy(&args, values); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
|
||||
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
|
||||
}
|
||||
|
||||
chunks = args.Chunks
|
||||
version = args.Version
|
||||
} else if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
|
||||
if len(values) < 3 {
|
||||
return 0, 0, 0, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
|
||||
}
|
||||
|
||||
var ok bool
|
||||
version, ok = values[0].(uint8)
|
||||
if !ok {
|
||||
return 0, 0, 0, fmt.Errorf("invalid version type: %T", values[0])
|
||||
}
|
||||
return version, 0, 0, nil
|
||||
}
|
||||
|
||||
var startBlock uint64
|
||||
@@ -124,7 +137,7 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
// | 1 byte | 60 bytes | ... | 60 bytes |
|
||||
// | num blocks | block 1 | ... | block n |
|
||||
if len(chunks) == 0 {
|
||||
return 0, 0, errors.New("invalid chunks")
|
||||
return 0, 0, 0, errors.New("invalid chunks")
|
||||
}
|
||||
chunk := chunks[0]
|
||||
block := chunk[1:61] // first block in chunk
|
||||
@@ -135,7 +148,36 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
|
||||
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
|
||||
finishBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
return startBlock, finishBlock, err
|
||||
return version, startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetParentBatchHashFromCalldata gets the parent batch hash from calldata.
|
||||
// It only supports commitBatches and commitAndFinalizeBatch, which only accept batches >= v7.
|
||||
func GetParentBatchHashFromCalldata(txData []byte) (common.Hash, error) {
|
||||
const methodIDLength = 4
|
||||
if len(txData) < methodIDLength {
|
||||
return common.Hash{}, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
|
||||
}
|
||||
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
|
||||
}
|
||||
values, err := method.Inputs.Unpack(txData[methodIDLength:])
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
|
||||
}
|
||||
|
||||
if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
|
||||
if len(values) < 3 {
|
||||
return common.Hash{}, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
|
||||
}
|
||||
parentBatchHash, ok := values[1].([32]byte)
|
||||
if !ok {
|
||||
return common.Hash{}, fmt.Errorf("invalid parentBatchHash type: %T", values[1])
|
||||
}
|
||||
return common.BytesToHash(parentBatchHash[:]), nil
|
||||
}
|
||||
return common.Hash{}, fmt.Errorf("method %s does not support parent batch header", method.Name)
|
||||
}
|
||||
|
||||
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -91,7 +91,7 @@ linters-settings:
|
||||
#local-prefixes: github.com/org/project
|
||||
gocyclo:
|
||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
||||
min-complexity: 30
|
||||
min-complexity: 40
|
||||
maligned:
|
||||
# print struct with more effective memory layout or not, false by default
|
||||
suggest-new: true
|
||||
@@ -254,6 +254,9 @@ issues:
|
||||
- linters:
|
||||
- wsl
|
||||
text: "expressions should not be cuddled with declarations or returns"
|
||||
- linters:
|
||||
- govet
|
||||
text: 'shadow: declaration of "(err|ctx)" shadows declaration at'
|
||||
|
||||
# Independently from option `exclude` we use default exclude patterns,
|
||||
# it can be disabled by this option. To list all
|
||||
|
||||
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build blob_uploader
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
|
||||
|
||||
# Pull blob_uploader into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/blob_uploader /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["blob_uploader"]
|
||||
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
@@ -10,10 +10,11 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge-history-api/cmd/db_cli && go build -v -p 4 -o /bin/db_cli
|
||||
cd /src/bridge-history-api/cmd/db_cli && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
# Pull db_cli into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -9,6 +9,12 @@ RUN cargo chef prepare --recipe-path recipe.json
|
||||
FROM chef as zkp-builder
|
||||
COPY ./common/libzkp/impl/rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run scripts to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/plonky3-gpu /plonky3-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-stark-gpu /openvm-stark-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
@@ -16,7 +22,7 @@ RUN cargo build --release
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/cuda-go-rust-builder:cuda-11.7.1-go-1.21-rust-nightly-2023-12-03 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
@@ -36,7 +42,7 @@ COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/log
|
||||
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
|
||||
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
|
||||
# ENV CHAIN_ID=534353
|
||||
|
||||
17
build/dockerfiles/coordinator-api/checkout_all.sh
Executable file
17
build/dockerfiles/coordinator-api/checkout_all.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
PLONKY3_GPU_COMMIT=261b322 # v0.2.0
|
||||
OPENVM_STARK_GPU_COMMIT=3082234 # PR#48
|
||||
OPENVM_GPU_COMMIT=8094b4f # branch: patch-v1.2.0
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout plonky3-gpu
|
||||
cd $DIR/plonky3-gpu && git checkout ${PLONKY3_GPU_COMMIT}
|
||||
|
||||
# checkout openvm-stark-gpu
|
||||
cd $DIR/openvm-stark-gpu && git checkout ${OPENVM_STARK_GPU_COMMIT}
|
||||
|
||||
# checkout openvm-gpu
|
||||
cd $DIR/openvm-gpu && git checkout ${OPENVM_GPU_COMMIT}
|
||||
10
build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
Executable file
10
build/dockerfiles/coordinator-api/clone_openvm_gpu.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone openvm-gpu if not exists
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch --all --force
|
||||
10
build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
Executable file
10
build/dockerfiles/coordinator-api/clone_openvm_stark_gpu.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone openvm-stark-gpu if not exists
|
||||
if [ ! -d $DIR/openvm-stark-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-stark-gpu.git $DIR/openvm-stark-gpu
|
||||
fi
|
||||
cd $DIR/openvm-stark-gpu && git fetch --all --force
|
||||
10
build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
Executable file
10
build/dockerfiles/coordinator-api/clone_plonky3_gpu.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# clone plonky3-gpu if not exists
|
||||
if [ ! -d $DIR/plonky3-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/plonky3-gpu.git $DIR/plonky3-gpu
|
||||
fi
|
||||
cd $DIR/plonky3-gpu && git fetch --all --force
|
||||
92
build/dockerfiles/coordinator-api/config.toml
Normal file
92
build/dockerfiles/coordinator-api/config.toml
Normal file
@@ -0,0 +1,92 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics", "evm-prove"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
|
||||
# stark-backend
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
|
||||
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
|
||||
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-stark-gpu.git"]
|
||||
openvm-stark-backend = { path = "/openvm-stark-gpu/crates/stark-backend", features = ["gpu"] }
|
||||
openvm-stark-sdk = { path = "/openvm-stark-gpu/crates/stark-sdk", features = ["gpu"] }
|
||||
|
||||
# plonky3
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { path = "/plonky3-gpu/air" }
|
||||
p3-field = { path = "/plonky3-gpu/field" }
|
||||
p3-commit = { path = "/plonky3-gpu/commit" }
|
||||
p3-matrix = { path = "/plonky3-gpu/matrix" }
|
||||
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
|
||||
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
|
||||
p3-util = { path = "/plonky3-gpu/util" }
|
||||
p3-challenger = { path = "/plonky3-gpu/challenger" }
|
||||
p3-dft = { path = "/plonky3-gpu/dft" }
|
||||
p3-fri = { path = "/plonky3-gpu/fri" }
|
||||
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
|
||||
p3-keccak = { path = "/plonky3-gpu/keccak" }
|
||||
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
|
||||
p3-blake3 = { path = "/plonky3-gpu/blake3" }
|
||||
p3-mds = { path = "/plonky3-gpu/mds" }
|
||||
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
|
||||
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
|
||||
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
|
||||
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
|
||||
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
|
||||
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
|
||||
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
|
||||
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
|
||||
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
|
||||
|
||||
# gpu crates
|
||||
[patch."ssh://git@github.com/scroll-tech/plonky3-gpu.git"]
|
||||
p3-gpu-base = { path = "/plonky3-gpu/gpu-base" }
|
||||
p3-gpu-build = { path = "/plonky3-gpu/gpu-build" }
|
||||
p3-gpu-field = { path = "/plonky3-gpu/gpu-field" }
|
||||
p3-gpu-backend = { path = "/plonky3-gpu/gpu-backend" }
|
||||
p3-gpu-module = { path = "/plonky3-gpu/gpu-module" }
|
||||
p3-air = { path = "/plonky3-gpu/air" }
|
||||
p3-field = { path = "/plonky3-gpu/field" }
|
||||
p3-commit = { path = "/plonky3-gpu/commit" }
|
||||
p3-matrix = { path = "/plonky3-gpu/matrix" }
|
||||
p3-baby-bear = { path = "/plonky3-gpu/baby-bear" }
|
||||
p3-koala-bear = { path = "/plonky3-gpu/koala-bear" }
|
||||
p3-util = { path = "/plonky3-gpu/util" }
|
||||
p3-challenger = { path = "/plonky3-gpu/challenger" }
|
||||
p3-dft = { path = "/plonky3-gpu/dft" }
|
||||
p3-fri = { path = "/plonky3-gpu/fri" }
|
||||
p3-goldilocks = { path = "/plonky3-gpu/goldilocks" }
|
||||
p3-keccak = { path = "/plonky3-gpu/keccak" }
|
||||
p3-keccak-air = { path = "/plonky3-gpu/keccak-air" }
|
||||
p3-blake3 = { path = "/plonky3-gpu/blake3" }
|
||||
p3-mds = { path = "/plonky3-gpu/mds" }
|
||||
p3-monty-31 = { path = "/plonky3-gpu/monty-31" }
|
||||
p3-merkle-tree = { path = "/plonky3-gpu/merkle-tree" }
|
||||
p3-poseidon = { path = "/plonky3-gpu/poseidon" }
|
||||
p3-poseidon2 = { path = "/plonky3-gpu/poseidon2" }
|
||||
p3-poseidon2-air = { path = "/plonky3-gpu/poseidon2-air" }
|
||||
p3-symmetric = { path = "/plonky3-gpu/symmetric" }
|
||||
p3-uni-stark = { path = "/plonky3-gpu/uni-stark" }
|
||||
p3-maybe-rayon = { path = "/plonky3-gpu/maybe-rayon" }
|
||||
p3-bn254-fr = { path = "/plonky3-gpu/bn254-fr" }
|
||||
2
build/dockerfiles/coordinator-api/gitconfig
Normal file
2
build/dockerfiles/coordinator-api/gitconfig
Normal file
@@ -0,0 +1,2 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
@@ -16,10 +16,11 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
|
||||
cd /src/database/cmd && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
|
||||
|
||||
# Pull db_cli into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
# Pull db_cli into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
COPY --from=builder /bin/db_cli /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["db_cli"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG CUDA_VERSION=11.7.1
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22.12
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -36,7 +36,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.21
|
||||
ARG GO_VERSION=1.22.12
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
@@ -32,7 +32,7 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
|
||||
else \
|
||||
echo "Unsupported architecture"; exit 1; \
|
||||
fi
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz
|
||||
RUN rm go${GO_VERSION}.linux-$(cat /tmp/arch).tar.gz && rm /tmp/arch
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
23
build/dockerfiles/prover.Dockerfile
Normal file
23
build/dockerfiles/prover.Dockerfile
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM ubuntu:24.04 AS builder
|
||||
|
||||
RUN apt-get update -y && apt-get upgrade -y
|
||||
|
||||
# Install basic packages
|
||||
RUN apt-get install build-essential curl wget git pkg-config -y
|
||||
# Install dev-packages
|
||||
RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
|
||||
# Install Rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
COPY . /src
|
||||
|
||||
RUN cd /src/zkvm-prover && make prover
|
||||
|
||||
FROM ubuntu:24.04 AS runtime
|
||||
|
||||
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["prover"]
|
||||
5
build/dockerfiles/prover.dockerignore
Normal file
5
build/dockerfiles/prover.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
|
||||
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
elapsed := time.Since(begin)
|
||||
sql, rowsAffected := fc()
|
||||
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
}
|
||||
|
||||
// InitDB init the db handler
|
||||
|
||||
@@ -15,8 +15,8 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/testcontainers/testcontainers-go v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0
|
||||
@@ -55,8 +55,8 @@ require (
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/cloudflare/cfssl v1.6.5 // indirect
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 // indirect
|
||||
github.com/consensys/bavard v0.1.25 // indirect
|
||||
github.com/consensys/gnark-crypto v0.14.0 // indirect
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/containerd v1.7.12 // indirect
|
||||
github.com/containerd/continuity v0.4.2 // indirect
|
||||
@@ -184,7 +184,7 @@ require (
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rs/cors v1.7.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5 // indirect
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
|
||||
github.com/scroll-tech/zktrie v0.8.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
|
||||
@@ -240,8 +240,8 @@ require (
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
|
||||
@@ -121,10 +121,10 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 h1:NlkpaLBPFr05mNJWVMH7PP4L30gFG6k4z1QpypLUSh8=
|
||||
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc=
|
||||
github.com/consensys/bavard v0.1.25 h1:5YcSBnp03/HvfpKaIQLr/ecspTp2k8YNR5rQLOWvUyc=
|
||||
github.com/consensys/bavard v0.1.25/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E=
|
||||
github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0=
|
||||
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
|
||||
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
||||
@@ -636,10 +636,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5 h1:vZ75srkZCStjDWq/kqZGLoucf7Y7qXC13nKjQVZ0zp8=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e h1:g8jtcGiHbjWYh/V7O245IDho3WfQT4CwEpBV+MhYDrg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -705,8 +705,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -873,8 +873,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -916,8 +916,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
||||
7668
common/libzkp/impl/Cargo.lock
generated
7668
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,26 +8,14 @@ edition = "2021"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[patch.crates-io]
|
||||
gobuild = { git = "https://github.com/scroll-tech/gobuild.git" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
# patched add rkyv support & MSRV 1.77
|
||||
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
|
||||
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
|
||||
# darwin
|
||||
prover_v4 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.2", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
# darwin_v2
|
||||
prover_v5 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.4.2", package = "scroll-zkvm-prover" }
|
||||
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.4.2", package = "scroll-zkvm-verifier" }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
@@ -44,3 +32,35 @@ opt-level = 3
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
||||
[patch."https://github.com/openvm-org/stark-backend.git"]
|
||||
openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
|
||||
"nightly-features",
|
||||
], tag = "v0.2.0" }
|
||||
p3-koala-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
|
||||
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.2.0" }
|
||||
|
||||
@@ -1 +1 @@
|
||||
nightly-2023-12-03
|
||||
nightly-2024-12-06
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
mod utils;
|
||||
mod verifier;
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec};
|
||||
use libc::c_char;
|
||||
use prover_v5::utils::init_env_and_log;
|
||||
use verifier::{TaskType, VerifierConfig};
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init(config: *const c_char) {
|
||||
init_env_and_log("ffi_init");
|
||||
|
||||
let config_str = c_char_to_str(config);
|
||||
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
|
||||
verifier::init(verifier_config);
|
||||
@@ -26,9 +25,8 @@ pub unsafe extern "C" fn verify_chunk_proof(
|
||||
}
|
||||
|
||||
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let proof = c_char_to_vec(proof);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Err(e) = verifier {
|
||||
@@ -61,3 +59,18 @@ pub unsafe extern "C" fn verify_bundle_proof(
|
||||
) -> c_char {
|
||||
verify_proof(proof, fork_name, TaskType::Bundle)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
_dump_vk(fork_name, file);
|
||||
}
|
||||
|
||||
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let verifier = verifier::get_verifier(fork_name_str);
|
||||
|
||||
if let Ok(verifier) = verifier {
|
||||
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
mod darwin;
|
||||
mod darwin_v2;
|
||||
#![allow(static_mut_refs)]
|
||||
|
||||
mod euclidv2;
|
||||
use anyhow::{bail, Result};
|
||||
use darwin::DarwinVerifier;
|
||||
use darwin_v2::DarwinV2Verifier;
|
||||
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
|
||||
use prover_v4::utils::load_params;
|
||||
use euclidv2::EuclidV2Verifier;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::OnceCell, collections::BTreeMap, rc::Rc};
|
||||
use std::{cell::OnceCell, path::Path, rc::Rc};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum TaskType {
|
||||
@@ -16,76 +13,40 @@ pub enum TaskType {
|
||||
Bundle,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VKDump {
|
||||
pub chunk_vk: String,
|
||||
pub batch_vk: String,
|
||||
pub bundle_vk: String,
|
||||
}
|
||||
|
||||
pub trait ProofVerifier {
|
||||
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
|
||||
fn dump_vk(&self, file: &Path);
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct VerifierConfig {
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
}
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
|
||||
|
||||
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
|
||||
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
|
||||
static mut PARAMS_MAP: OnceCell<BTreeMap<u32, ParamsKZG<Bn256>>> = OnceCell::new();
|
||||
|
||||
pub fn init(config: VerifierConfig) {
|
||||
let low_conf = config.low_version_circuit;
|
||||
|
||||
std::env::set_var("SCROLL_PROVER_ASSETS_DIR", &low_conf.assets_path);
|
||||
let params_degrees = [
|
||||
*prover_v4::config::LAYER2_DEGREE,
|
||||
*prover_v4::config::LAYER4_DEGREE,
|
||||
];
|
||||
|
||||
// params should be shared between low and high
|
||||
let mut params_map = BTreeMap::new();
|
||||
for degree in params_degrees {
|
||||
if let std::collections::btree_map::Entry::Vacant(e) = params_map.entry(degree) {
|
||||
match load_params(&low_conf.params_path, degree, None) {
|
||||
Ok(params) => {
|
||||
e.insert(params);
|
||||
}
|
||||
Err(e) => panic!(
|
||||
"failed to load params, degree {}, dir {}, err {}",
|
||||
degree, low_conf.params_path, e
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
unsafe {
|
||||
PARAMS_MAP.set(params_map).unwrap_unchecked();
|
||||
}
|
||||
|
||||
let verifier = DarwinVerifier::new(unsafe { PARAMS_MAP.get().unwrap() }, &low_conf.assets_path);
|
||||
|
||||
unsafe {
|
||||
VERIFIER_LOW
|
||||
.set(VerifierPair(
|
||||
low_conf.fork_name,
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
}
|
||||
let high_conf = config.high_version_circuit;
|
||||
let verifier =
|
||||
DarwinV2Verifier::new(unsafe { PARAMS_MAP.get().unwrap() }, &high_conf.assets_path);
|
||||
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_HIGH
|
||||
.set(VerifierPair(
|
||||
high_conf.fork_name,
|
||||
config.high_version_circuit.fork_name,
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
@@ -94,12 +55,6 @@ pub fn init(config: VerifierConfig) {
|
||||
|
||||
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
|
||||
unsafe {
|
||||
if let Some(verifier) = VERIFIER_LOW.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(verifier) = VERIFIER_HIGH.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType};
|
||||
|
||||
use anyhow::Result;
|
||||
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use prover_v4::{
|
||||
aggregator::Verifier as AggVerifier, zkevm::Verifier, BatchProof, BundleProof, ChunkProof,
|
||||
};
|
||||
use std::{collections::BTreeMap, env};
|
||||
|
||||
pub struct DarwinVerifier<'params> {
|
||||
verifier: Verifier<'params>,
|
||||
agg_verifier: AggVerifier<'params>,
|
||||
}
|
||||
|
||||
impl<'params> DarwinVerifier<'params> {
|
||||
pub fn new(params_map: &'params BTreeMap<u32, ParamsKZG<Bn256>>, assets_dir: &str) -> Self {
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_params_and_assets(params_map, assets_dir);
|
||||
let agg_verifier = AggVerifier::from_params_and_assets(params_map, assets_dir);
|
||||
|
||||
Self {
|
||||
verifier,
|
||||
agg_verifier,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'params> ProofVerifier for DarwinVerifier<'params> {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
let result = panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.verifier.verify_chunk_proof(proof)
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.agg_verifier.verify_batch_proof(&proof)
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.agg_verifier.verify_bundle_proof(proof)
|
||||
}
|
||||
});
|
||||
result.map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType};
|
||||
|
||||
use anyhow::Result;
|
||||
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use prover_v5::{
|
||||
aggregator::Verifier as AggVerifier, zkevm::Verifier, BatchProof, BundleProof, ChunkProof,
|
||||
};
|
||||
use std::{collections::BTreeMap, env};
|
||||
|
||||
pub struct DarwinV2Verifier<'params> {
|
||||
verifier: Verifier<'params>,
|
||||
agg_verifier: AggVerifier<'params>,
|
||||
}
|
||||
|
||||
impl<'params> DarwinV2Verifier<'params> {
|
||||
pub fn new(params_map: &'params BTreeMap<u32, ParamsKZG<Bn256>>, assets_dir: &str) -> Self {
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_params_and_assets(params_map, assets_dir);
|
||||
let agg_verifier = AggVerifier::from_params_and_assets(params_map, assets_dir);
|
||||
|
||||
Self {
|
||||
verifier,
|
||||
agg_verifier,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'params> ProofVerifier for DarwinV2Verifier<'params> {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
let result = panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.verifier.verify_chunk_proof(proof)
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.agg_verifier.verify_batch_proof(&proof)
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.agg_verifier.verify_bundle_proof(proof)
|
||||
}
|
||||
});
|
||||
result.map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
}
|
||||
65
common/libzkp/impl/src/verifier/euclidv2.rs
Normal file
65
common/libzkp/impl/src/verifier/euclidv2.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV2, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidV2Verifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV2,
|
||||
}
|
||||
|
||||
impl EuclidV2Verifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV2::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidV2Verifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
@@ -8,3 +8,5 @@ char verify_batch_proof(char* proof, char* fork_name);
|
||||
char verify_bundle_proof(char* proof, char* fork_name);
|
||||
|
||||
char verify_chunk_proof(char* proof, char* fork_name);
|
||||
|
||||
void dump_vk(char* fork_name, char* file);
|
||||
|
||||
4
common/testdata/blobdata.json
vendored
Normal file
4
common/testdata/blobdata.json
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -276,8 +276,8 @@ const (
|
||||
SenderTypeFinalizeBatch
|
||||
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
|
||||
SenderTypeL1GasOracle
|
||||
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
|
||||
SenderTypeL2GasOracle
|
||||
// SenderTypeL2GasOracleDeprecated indicates a sender from L1 responsible for updating L2 gas prices, which is deprecated.
|
||||
SenderTypeL2GasOracleDeprecated
|
||||
)
|
||||
|
||||
// String returns a string representation of the SenderType.
|
||||
@@ -289,8 +289,8 @@ func (t SenderType) String() string {
|
||||
return "SenderTypeFinalizeBatch"
|
||||
case SenderTypeL1GasOracle:
|
||||
return "SenderTypeL1GasOracle"
|
||||
case SenderTypeL2GasOracle:
|
||||
return "SenderTypeL2GasOracle"
|
||||
case SenderTypeL2GasOracleDeprecated:
|
||||
return "SenderTypeL2GasOracleDeprecated"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
|
||||
}
|
||||
@@ -326,3 +326,53 @@ func (s TxStatus) String() string {
|
||||
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobUploadStatus represents the status of a blob upload
|
||||
type BlobUploadStatus int
|
||||
|
||||
const (
|
||||
// BlobUploadStatusUndefined indicates an undefined status
|
||||
BlobUploadStatusUndefined BlobUploadStatus = iota
|
||||
// BlobUploadStatusPending indicates a pending upload status
|
||||
BlobUploadStatusPending
|
||||
// BlobUploadStatusUploaded indicates a successful upload status
|
||||
BlobUploadStatusUploaded
|
||||
// BlobUploadStatusFailed indicates a failed upload status
|
||||
BlobUploadStatusFailed
|
||||
)
|
||||
|
||||
func (s BlobUploadStatus) String() string {
|
||||
switch s {
|
||||
case BlobUploadStatusPending:
|
||||
return "BlobUploadStatusPending"
|
||||
case BlobUploadStatusUploaded:
|
||||
return "BlobUploadStatusUploaded"
|
||||
case BlobUploadStatusFailed:
|
||||
return "BlobUploadStatusFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobUploadStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobStoragePlatform represents the platform a blob upload to
|
||||
type BlobStoragePlatform int
|
||||
|
||||
const (
|
||||
// BlobStoragePlatformUndefined indicates an undefined platform
|
||||
BlobStoragePlatformUndefined BlobStoragePlatform = iota
|
||||
// BlobStoragePlatformS3 represents AWS S3
|
||||
BlobStoragePlatformS3
|
||||
// BlobStoragePlatformArweave represents storage blockchain Arweave
|
||||
BlobStoragePlatformArweave
|
||||
)
|
||||
|
||||
func (s BlobStoragePlatform) String() string {
|
||||
switch s {
|
||||
case BlobStoragePlatformS3:
|
||||
return "BlobStoragePlatformS3"
|
||||
case BlobStoragePlatformArweave:
|
||||
return "BlobStoragePlatformArweave"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobStoragePlatform (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,9 +173,9 @@ func TestSenderType(t *testing.T) {
|
||||
"SenderTypeL1GasOracle",
|
||||
},
|
||||
{
|
||||
"SenderTypeL2GasOracle",
|
||||
SenderTypeL2GasOracle,
|
||||
"SenderTypeL2GasOracle",
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
SenderTypeL2GasOracleDeprecated,
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
const (
|
||||
// StatusOk means generate proof success
|
||||
StatusOk RespStatus = iota
|
||||
// StatusProofError means generate proof failed
|
||||
StatusProofError
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
)
|
||||
|
||||
// ProofType represents the type of task.
|
||||
@@ -44,119 +43,263 @@ const (
|
||||
ProofTypeBundle
|
||||
)
|
||||
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail.
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
|
||||
type ChunkTaskDetail struct {
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// it is a hex encoded big with fixed length on 48 bytes
|
||||
type Byte48 struct {
|
||||
hexutil.Big
|
||||
}
|
||||
|
||||
func (e Byte48) MarshalText() ([]byte, error) {
|
||||
i := e.ToInt()
|
||||
// overrite encode big
|
||||
if sign := i.Sign(); sign < 0 {
|
||||
// sanity check
|
||||
return nil, errors.New("Byte48 must be positive integer")
|
||||
} else {
|
||||
s := i.Text(16)
|
||||
if len(s) > 96 {
|
||||
return nil, errors.New("integer Exceed 384bit")
|
||||
}
|
||||
return []byte(fmt.Sprintf("0x%0*s", 96, s)), nil
|
||||
}
|
||||
}
|
||||
|
||||
func isString(input []byte) bool {
|
||||
return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
|
||||
}
|
||||
|
||||
// hexutil.Big has limition of 256bit so we have to override it ...
|
||||
func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
if !isString(input) {
|
||||
return errors.New("not hex string")
|
||||
}
|
||||
|
||||
b, err := hexutil.Decode(string(input[1 : len(input)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(b) != 48 {
|
||||
return fmt.Errorf("not a 48 bytes hex string: %d", len(b))
|
||||
}
|
||||
var dec big.Int
|
||||
dec.SetBytes(b)
|
||||
*e = Byte48{(hexutil.Big)(dec)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*ChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
BatchHeader interface{} `json:"batch_header"`
|
||||
BlobBytes []byte `json:"blob_bytes"`
|
||||
KzgProof Byte48 `json:"kzg_proof,omitempty"`
|
||||
KzgCommitment Byte48 `json:"kzg_commitment,omitempty"`
|
||||
ChallengeDigest common.Hash `json:"challenge_digest,omitempty"`
|
||||
}
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
BatchProofs []*BatchProof `json:"batch_proofs"`
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkInfo is for calculating pi_hash for chunk
|
||||
type ChunkInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
TxBytesHash common.Hash `json:"tx_data_digest"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
TxDataLength uint64 `json:"tx_data_length"`
|
||||
InitialBlockNumber uint64 `json:"initial_block_number"`
|
||||
BlockCtxs []BlockContextV2 `json:"block_ctxs"`
|
||||
}
|
||||
|
||||
// BlockContextV2 is the block context for euclid v2
|
||||
type BlockContextV2 struct {
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
BaseFee hexutil.Big `json:"base_fee"`
|
||||
GasLimit uint64 `json:"gas_limit"`
|
||||
NumTxs uint16 `json:"num_txs"`
|
||||
NumL1Msgs uint16 `json:"num_l1_msgs"`
|
||||
}
|
||||
|
||||
// Proof for flatten VM proof
|
||||
type OpenVMProof struct {
|
||||
Proof []byte `json:"proofs"`
|
||||
PublicValues []byte `json:"public_values"`
|
||||
}
|
||||
|
||||
// Proof for flatten EVM proof
|
||||
type OpenVMEvmProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
}
|
||||
|
||||
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type OpenVMChunkProof struct {
|
||||
MetaData struct {
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
func (p *OpenVMChunkProof) Proof() []byte {
|
||||
proofJson, err := json.Marshal(p.VmProof)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("marshaling error", err))
|
||||
}
|
||||
|
||||
return proofJson
|
||||
}
|
||||
|
||||
// OpenVMBatchInfo is for calculating pi_hash for batch header
|
||||
type OpenVMBatchInfo struct {
|
||||
ParentBatchHash common.Hash `json:"parent_batch_hash"`
|
||||
ParentStateRoot common.Hash `json:"parent_state_root"`
|
||||
StateRoot common.Hash `json:"state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
PostMsgQueueHash common.Hash `json:"post_msg_queue_hash"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type OpenVMBatchProof struct {
|
||||
MetaData struct {
|
||||
BatchInfo *OpenVMBatchInfo `json:"batch_info"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
} `json:"metadata"`
|
||||
|
||||
VmProof *OpenVMProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
func (p *OpenVMBatchProof) Proof() []byte {
|
||||
proofJson, err := json.Marshal(p.VmProof)
|
||||
if err != nil {
|
||||
panic(fmt.Sprint("marshaling error", err))
|
||||
}
|
||||
|
||||
return proofJson
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *OpenVMBatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
if ap.MetaData.BatchInfo == nil {
|
||||
return errors.New("batch info not ready")
|
||||
}
|
||||
|
||||
if ap.VmProof == nil {
|
||||
return errors.New("proof not ready")
|
||||
} else {
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
pf := ap.VmProof
|
||||
if pf.Proof == nil {
|
||||
return errors.New("proof data not ready")
|
||||
}
|
||||
if len(pf.PublicValues) == 0 {
|
||||
return errors.New("proof public value not ready")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenVMBundleInfo is for calculating pi_hash for bundle header
|
||||
type OpenVMBundleInfo struct {
|
||||
ChainID uint64 `json:"chain_id"`
|
||||
PrevStateRoot common.Hash `json:"prev_state_root"`
|
||||
PostStateRoot common.Hash `json:"post_state_root"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
NumBatches uint32 `json:"num_batches"`
|
||||
PrevBatchHash common.Hash `json:"prev_batch_hash"`
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
MsgQueueHash common.Hash `json:"msg_queue_hash"`
|
||||
}
|
||||
|
||||
// SubCircuitRowUsage tracing info added in v0.11.0rc8
|
||||
type SubCircuitRowUsage struct {
|
||||
Name string `json:"name"`
|
||||
RowNumber uint64 `json:"row_number"`
|
||||
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type OpenVMBundleProof struct {
|
||||
MetaData struct {
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info"`
|
||||
BunndlePIHash common.Hash `json:"bundle_pi_hash"`
|
||||
} `json:"metadata"`
|
||||
|
||||
EvmProof *OpenVMEvmProof `json:"proof"`
|
||||
Vk []byte `json:"vk,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
type BatchProof struct {
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
BatchHash common.Hash `json:"batch_hash"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BatchProof is in a legal format
|
||||
func (ap *BatchProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
|
||||
type BundleProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
// Proof returns the proof bytes that are eventually passed as calldata for on-chain bundle proof verification.
|
||||
//
|
||||
// There are 12 accumulators for a SNARK proof. The accumulators are the first 12 elements of the EvmProof's
|
||||
// Instances field. The remaining items in Instances are supplied on-chain by the ScrollChain contract.
|
||||
//
|
||||
// The structure of these bytes is:
|
||||
// | byte index start | byte length | value | description |
|
||||
// |------------------|----------------|----------|---------------------|
|
||||
// | 0 | 32 | accs[0] | accumulator 1 |
|
||||
// | 32 | 32 | accs[1] | accumulator 2 |
|
||||
// | 32*i ... | 32 | accs[i] | accumulator i ... |
|
||||
// | 352 | 32 | accs[11] | accumulator 12 |
|
||||
// | 384 | dynamic | proof | proof bytes |
|
||||
func (p *OpenVMBundleProof) Proof() []byte {
|
||||
proofBytes := make([]byte, 0, 384+len(p.EvmProof.Proof))
|
||||
proofBytes = append(proofBytes, p.EvmProof.Instances[:384]...)
|
||||
return append(proofBytes, p.EvmProof.Proof...)
|
||||
}
|
||||
|
||||
// SanityCheck checks whether a BundleProof is in a legal format
|
||||
func (ap *BundleProof) SanityCheck() error {
|
||||
func (ap *OpenVMBundleProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
if ap.MetaData.BundleInfo == nil {
|
||||
return errors.New("bundle info not ready")
|
||||
}
|
||||
|
||||
if ap.EvmProof == nil {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
} else {
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
}
|
||||
pf := ap.EvmProof
|
||||
if len(pf.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(pf.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
|
||||
if len(ap.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
|
||||
if len(ap.Vk) == 0 {
|
||||
return errors.New("vk not ready")
|
||||
if len(pf.Instances) == 0 {
|
||||
return errors.New("instance not ready")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
22
common/types/message/message_test.go
Normal file
22
common/types/message/message_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBytes48(t *testing.T) {
|
||||
ti := &Byte48{}
|
||||
ti.UnmarshalText([]byte("0x1"))
|
||||
if s, err := ti.MarshalText(); err == nil {
|
||||
if len(s) != 98 {
|
||||
panic(fmt.Sprintf("wrong str: %s", s))
|
||||
}
|
||||
}
|
||||
ti.UnmarshalText([]byte("0x0"))
|
||||
if s, err := ti.MarshalText(); err == nil {
|
||||
if len(s) != 98 {
|
||||
panic(fmt.Sprintf("wrong str: %s", s))
|
||||
}
|
||||
}
|
||||
}
|
||||
23
common/utils/blob.go
Normal file
23
common/utils/blob.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// CalculateVersionedBlobHash calculate the kzg4844 versioned blob hash from a blob
|
||||
func CalculateVersionedBlobHash(blob kzg4844.Blob) ([32]byte, error) {
|
||||
// calculate kzg4844 commitment from blob
|
||||
commit, err := kzg4844.BlobToCommitment(&blob)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("failed to get blob commitment, err: %w", err)
|
||||
}
|
||||
|
||||
// calculate kzg4844 versioned blob hash from blob commitment
|
||||
hasher := sha256.New()
|
||||
vh := kzg4844.CalcBlobHashV1(hasher, &commit)
|
||||
|
||||
return vh, nil
|
||||
}
|
||||
51
common/utils/blob_test.go
Normal file
51
common/utils/blob_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
type BlobData struct {
|
||||
VersionedBlobHash string `json:"versionedBlobHash"`
|
||||
BlobData string `json:"blobData"`
|
||||
}
|
||||
|
||||
// TestCalculateVersionedBlobHash tests the CalculateVersionedBlobHash function
|
||||
func TestCalculateVersionedBlobHash(t *testing.T) {
|
||||
// Read the test data
|
||||
data, err := os.ReadFile("../testdata/blobdata.json")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
var blobData BlobData
|
||||
if err := json.Unmarshal(data, &blobData); err != nil {
|
||||
t.Fatalf("Failed to parse blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
blobBytes, err := hex.DecodeString(blobData.BlobData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode blob data: %v", err)
|
||||
}
|
||||
|
||||
// Convert []byte to kzg4844.Blob
|
||||
var blob kzg4844.Blob
|
||||
copy(blob[:], blobBytes)
|
||||
|
||||
// Calculate the hash
|
||||
calculatedHashBytes, err := CalculateVersionedBlobHash(blob)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate versioned blob hash: %v", err)
|
||||
}
|
||||
|
||||
calculatedHash := hex.EncodeToString(calculatedHashBytes[:])
|
||||
|
||||
if calculatedHash != blobData.VersionedBlobHash {
|
||||
t.Fatalf("Hash mismatch: got %s, want %s", calculatedHash, blobData.VersionedBlobHash)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -20,9 +20,12 @@ var (
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
&ImportGenesisFlag,
|
||||
&MinCodecVersionFlag,
|
||||
}
|
||||
// ProposerToolFlags contains flags only used in proposer tool
|
||||
ProposerToolFlags = []cli.Flag{
|
||||
&StartL2BlockFlag,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
Name: "config",
|
||||
@@ -73,12 +76,6 @@ var (
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
// ImportGenesisFlag import genesis batch during startup
|
||||
ImportGenesisFlag = cli.BoolFlag{
|
||||
Name: "import-genesis",
|
||||
Usage: "Import genesis batch into L1 contract during startup",
|
||||
Value: false,
|
||||
}
|
||||
// ServicePortFlag is the port the service will listen on
|
||||
ServicePortFlag = cli.IntFlag{
|
||||
Name: "service.port",
|
||||
@@ -97,4 +94,10 @@ var (
|
||||
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
|
||||
Required: true,
|
||||
}
|
||||
// StartL2BlockFlag indicates the start L2 block number for proposer tool
|
||||
StartL2BlockFlag = cli.Uint64Flag{
|
||||
Name: "start-l2-block",
|
||||
Usage: "Start L2 block number for proposer tool",
|
||||
Value: 0,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.85"
|
||||
var tag = "v4.5.24"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -90,18 +90,10 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
cfg.ProverManager = &coordinatorConfig.ProverManager{
|
||||
ProversPerSession: 1,
|
||||
Verifier: &coordinatorConfig.VerifierConfig{
|
||||
MockMode: true,
|
||||
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwin",
|
||||
MinProverVersion: "v4.2.0",
|
||||
},
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwinV2",
|
||||
MinProverVersion: "v4.3.0",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 60,
|
||||
|
||||
@@ -62,9 +62,9 @@ func action(ctx *cli.Context) error {
|
||||
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
|
||||
}
|
||||
|
||||
var batchProofs []*message.BatchProof
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.BatchProof
|
||||
var proof message.OpenVMBatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
log.Error("failed to unmarshal batch proof")
|
||||
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
|
||||
|
||||
@@ -2,21 +2,14 @@
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"low_version_circuit": {
|
||||
"params_path": "params",
|
||||
"assets_path": "assets",
|
||||
"fork_name": "darwin",
|
||||
"min_prover_version": "v4.4.43"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"params_path": "params",
|
||||
"assets_path": "assets",
|
||||
"fork_name": "darwinV2",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,8 +2,6 @@ module scroll-tech/coordinator
|
||||
|
||||
go 1.22
|
||||
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
@@ -11,10 +9,10 @@ require (
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/arch v0.5.0 // indirect
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
@@ -52,8 +50,8 @@ require (
|
||||
github.com/bits-and-blooms/bitset v1.20.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/chenzhuoyu/iasm v0.9.0 // indirect
|
||||
github.com/consensys/bavard v0.1.25 // indirect
|
||||
github.com/consensys/gnark-crypto v0.14.0 // indirect
|
||||
github.com/consensys/bavard v0.1.29 // indirect
|
||||
github.com/consensys/gnark-crypto v0.16.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
@@ -77,8 +75,8 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
)
|
||||
|
||||
@@ -32,10 +32,10 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
|
||||
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
|
||||
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
|
||||
github.com/consensys/bavard v0.1.25 h1:5YcSBnp03/HvfpKaIQLr/ecspTp2k8YNR5rQLOWvUyc=
|
||||
github.com/consensys/bavard v0.1.25/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E=
|
||||
github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0=
|
||||
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
|
||||
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
|
||||
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
|
||||
@@ -177,10 +177,10 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5 h1:vZ75srkZCStjDWq/kqZGLoucf7Y7qXC13nKjQVZ0zp8=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e h1:g8jtcGiHbjWYh/V7O245IDho3WfQT4CwEpBV+MhYDrg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -198,8 +198,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -252,8 +252,8 @@ golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -269,8 +269,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
|
||||
@@ -16,6 +16,8 @@ type ProverManager struct {
|
||||
// Number of attempts that a session can be retried if previous attempts failed.
|
||||
// Currently we only consider proving timeout as failure here.
|
||||
SessionAttempts uint8 `json:"session_attempts"`
|
||||
// Threshold for activating the external prover based on unassigned task count.
|
||||
ExternalProverThreshold int64 `json:"external_prover_threshold"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier"`
|
||||
// BatchCollectionTimeSec batch Proof collection time (in seconds).
|
||||
@@ -49,7 +51,6 @@ type Config struct {
|
||||
|
||||
// CircuitConfig circuit items.
|
||||
type CircuitConfig struct {
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
ForkName string `json:"fork_name"`
|
||||
MinProverVersion string `json:"min_prover_version"`
|
||||
@@ -57,8 +58,6 @@ type CircuitConfig struct {
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
MockMode bool `json:"mock_mode"`
|
||||
LowVersionCircuit *CircuitConfig `json:"low_version_circuit"`
|
||||
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
|
||||
}
|
||||
|
||||
|
||||
@@ -15,15 +15,18 @@ func TestConfig(t *testing.T) {
|
||||
"prover_manager": {
|
||||
"provers_per_session": 1,
|
||||
"session_attempts": 5,
|
||||
"external_prover_threshold": 32,
|
||||
"bundle_collection_time_sec": 180,
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"agg_vk_path": ""
|
||||
"high_version_circuit": {
|
||||
"assets_path": "assets",
|
||||
"fork_name": "euclidV2",
|
||||
"min_prover_version": "v4.4.45"
|
||||
}
|
||||
},
|
||||
"max_verifier_workers": 4,
|
||||
"min_prover_version": "v1.0.0"
|
||||
"max_verifier_workers": 4
|
||||
},
|
||||
"db": {
|
||||
"driver_name": "postgres",
|
||||
|
||||
@@ -46,7 +46,7 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
|
||||
|
||||
hardForkNames, err := a.loginLogic.ProverHardForkName(&login)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("prover hard name failure:%w", err)
|
||||
return "", fmt.Errorf("prover hard fork name failure:%w", err)
|
||||
}
|
||||
|
||||
// check the challenge is used, if used, return failure
|
||||
@@ -70,10 +70,11 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.HardForkName: v.HardForkName,
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.HardForkName: v.HardForkName,
|
||||
types.PublicKey: v.PublicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.ProverProviderTypeKey: v.Message.ProverProviderType,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,5 +97,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
c.Set(types.HardForkName, hardForkName)
|
||||
}
|
||||
|
||||
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
|
||||
c.Set(types.ProverProviderTypeKey, providerType)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap, "bundleVerifier", vf.BundleVkMap)
|
||||
log.Info("verifier created", "openVmVerifier", vf.OpenVMVkMap)
|
||||
|
||||
Auth = NewAuthController(db, cfg, vf)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
|
||||
|
||||
@@ -21,9 +21,8 @@ import (
|
||||
type LoginLogic struct {
|
||||
cfg *config.Config
|
||||
challengeOrm *orm.Challenge
|
||||
chunkVks map[string]struct{}
|
||||
batchVKs map[string]struct{}
|
||||
bundleVks map[string]struct{}
|
||||
|
||||
openVmVks map[string]struct{}
|
||||
|
||||
proverVersionHardForkMap map[string][]string
|
||||
}
|
||||
@@ -31,25 +30,14 @@ type LoginLogic struct {
|
||||
// NewLoginLogic new a LoginLogic
|
||||
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
|
||||
proverVersionHardForkMap := make(map[string][]string)
|
||||
if version.CheckScrollRepoVersion(cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
log.Error("config file error, low verifier min_prover_version should not more than high verifier min_prover_version",
|
||||
"low verifier min_prover_version", cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion,
|
||||
"high verifier min_prover_version", cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion)
|
||||
panic("verifier config file error")
|
||||
}
|
||||
|
||||
var highHardForks []string
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
|
||||
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}
|
||||
|
||||
return &LoginLogic{
|
||||
cfg: cfg,
|
||||
chunkVks: vf.ChunkVKMap,
|
||||
batchVKs: vf.BatchVKMap,
|
||||
bundleVks: vf.BundleVkMap,
|
||||
openVmVks: vf.OpenVMVkMap,
|
||||
challengeOrm: orm.NewChallenge(db),
|
||||
proverVersionHardForkMap: proverVersionHardForkMap,
|
||||
}
|
||||
@@ -68,44 +56,38 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", l.cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
if len(login.Message.ProverTypes) > 0 {
|
||||
vks := make(map[string]struct{})
|
||||
for _, proverType := range login.Message.ProverTypes {
|
||||
switch proverType {
|
||||
case types.ProverTypeChunk:
|
||||
for vk := range l.chunkVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
case types.ProverTypeBatch:
|
||||
for vk := range l.batchVKs {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
for vk := range l.bundleVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
default:
|
||||
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
|
||||
}
|
||||
}
|
||||
vks := make(map[string]struct{})
|
||||
for vk := range l.openVmVks {
|
||||
vks[vk] = struct{}{}
|
||||
}
|
||||
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
for _, vk := range login.Message.VKs {
|
||||
if _, ok := vks[vk]; !ok {
|
||||
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
|
||||
"prover_version", login.Message.ProverVersion, "message", login.Message)
|
||||
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
|
||||
version.Version, login.Message.ProverVersion)
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return errors.New("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
}
|
||||
|
||||
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
|
||||
login.Message.ProverProviderType = types.ProverProviderTypeInternal
|
||||
} else {
|
||||
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
|
||||
return errors.New("invalid prover provider type.")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
@@ -22,6 +24,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
// BatchProverTask is prover task implement for batch proof
|
||||
@@ -63,7 +66,20 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedBatchCount, getCountError := bp.batchOrm.GetUnassignedBatchCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedBatchCount < bp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var batchTask *orm.Batch
|
||||
var hardForkName string
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBatchTask *orm.Batch
|
||||
@@ -88,6 +104,30 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
taskCtx.taskType = message.ProofTypeBatch
|
||||
taskCtx.batchTask = tmpBatchTask
|
||||
|
||||
var checkErr error
|
||||
hardForkName, checkErr = bp.hardForkSanityCheck(ctx, taskCtx)
|
||||
if checkErr != nil {
|
||||
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBatchTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -109,23 +149,6 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
hardForkName, getHardForkErr := bp.hardForkName(ctx, batchTask)
|
||||
if getHardForkErr != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("retrieve hard fork name by batch failed", "task_id", batchTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
//if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
|
||||
// bp.recoverActiveAttempts(ctx, batchTask)
|
||||
// log.Error("incompatible prover version",
|
||||
// "requisite hard fork name", hardForkName,
|
||||
// "prover hard fork name", taskCtx.HardForkNames,
|
||||
// "task_id", batchTask.Hash)
|
||||
// return nil, ErrCoordinatorInternalFailure
|
||||
//}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
@@ -162,20 +185,6 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) hardForkName(ctx *gin.Context, batchTask *orm.Batch) (string, error) {
|
||||
startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, batchTask.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
|
||||
l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := encoding.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, batch *orm.Batch, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get chunk from db
|
||||
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
|
||||
@@ -188,30 +197,32 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
|
||||
}
|
||||
|
||||
var chunkProofs []*message.ChunkProof
|
||||
var chunkProofs []*message.OpenVMChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
var proof message.ChunkProof
|
||||
var proof message.OpenVMChunkProof
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, &proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
IsPadding: false,
|
||||
}
|
||||
if proof.ChunkInfo != nil {
|
||||
chunkInfo.TxBytes = proof.ChunkInfo.TxBytes
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
InitialBlockNumber: proof.MetaData.ChunkInfo.InitialBlockNumber,
|
||||
BlockCtxs: proof.MetaData.ChunkInfo.BlockCtxs,
|
||||
TxDataLength: proof.MetaData.ChunkInfo.TxDataLength,
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs)
|
||||
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs, hardForkName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
@@ -228,6 +239,9 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
TaskData: string(chunkProofsBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBatch.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
@@ -237,13 +251,23 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.ChunkProof) (*message.BatchTaskDetail, error) {
|
||||
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.OpenVMChunkProof, hardForkName string) (*message.BatchTaskDetail, error) {
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
if encoding.CodecVersion(dbBatch.CodecVersion) != encoding.CodecV3 && encoding.CodecVersion(dbBatch.CodecVersion) != encoding.CodecV4 {
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
|
||||
default:
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
@@ -258,6 +282,13 @@ func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
taskDetail.ChallengeDigest = common.HexToHash(dbBatch.ChallengeDigest)
|
||||
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
|
||||
// | z | y | kzg_commitment | kzg_proof |
|
||||
// |---------|---------|----------------|-----------|
|
||||
// | bytes32 | bytes32 | bytes48 | bytes48 |
|
||||
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[112:160]))}
|
||||
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(dbBatch.BlobDataProof[64:112]))}
|
||||
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
@@ -9,18 +9,19 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// BundleProverTask is prover task implement for bundle proof
|
||||
@@ -63,7 +64,20 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedBundleCount, getCountError := bp.bundleOrm.GetUnassignedBundleCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned bundle proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedBundleCount < bp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var bundleTask *orm.Bundle
|
||||
var hardForkName string
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBundleTask *orm.Bundle
|
||||
@@ -88,6 +102,30 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
taskCtx.taskType = message.ProofTypeBundle
|
||||
taskCtx.bundleTask = tmpBundleTask
|
||||
|
||||
var checkErr error
|
||||
hardForkName, checkErr = bp.hardForkSanityCheck(ctx, taskCtx)
|
||||
if checkErr != nil {
|
||||
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpBundleTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -109,23 +147,6 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
}
|
||||
|
||||
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
hardForkName, getHardForkErr := bp.hardForkName(ctx, bundleTask)
|
||||
if getHardForkErr != nil {
|
||||
bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
log.Error("retrieve hard fork name by bundle failed", "task_id", bundleTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
//if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
|
||||
// bp.recoverActiveAttempts(ctx, bundleTask)
|
||||
// log.Error("incompatible prover version",
|
||||
// "requisite hard fork name", hardForkName,
|
||||
// "prover hard fork name", taskCtx.HardForkNames,
|
||||
// "task_id", bundleTask.Hash)
|
||||
// return nil, ErrCoordinatorInternalFailure
|
||||
//}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: bundleTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
@@ -162,26 +183,6 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BundleProverTask) hardForkName(ctx *gin.Context, bundleTask *orm.Bundle) (string, error) {
|
||||
startBatch, getBatchErr := bp.batchOrm.GetBatchByHash(ctx, bundleTask.StartBatchHash)
|
||||
if getBatchErr != nil {
|
||||
return "", getBatchErr
|
||||
}
|
||||
|
||||
startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, startBatch.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
|
||||
l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
|
||||
hardForkName := encoding.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get bundle from db
|
||||
batches, err := bp.batchOrm.GetBatchesByBundleHash(ctx, task.TaskID)
|
||||
@@ -194,9 +195,14 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
|
||||
}
|
||||
|
||||
var batchProofs []*message.BatchProof
|
||||
parentBatch, err := bp.batchOrm.GetBatchByHash(ctx, batches[0].ParentBatchHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get parent batch for batch task id:%s err:%w", task.TaskID, err)
|
||||
}
|
||||
|
||||
var batchProofs []*message.OpenVMBatchProof
|
||||
for _, batch := range batches {
|
||||
var proof message.BatchProof
|
||||
var proof message.OpenVMBatchProof
|
||||
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
|
||||
}
|
||||
@@ -207,6 +213,24 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
BatchProofs: batchProofs,
|
||||
}
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
taskDetail.BundleInfo = &message.OpenVMBundleInfo{
|
||||
ChainID: bp.cfg.L2.ChainID,
|
||||
PrevStateRoot: common.HexToHash(parentBatch.StateRoot),
|
||||
PostStateRoot: common.HexToHash(batches[len(batches)-1].StateRoot),
|
||||
WithdrawRoot: common.HexToHash(batches[len(batches)-1].WithdrawRoot),
|
||||
NumBatches: uint32(len(batches)),
|
||||
PrevBatchHash: common.HexToHash(batches[0].ParentBatchHash),
|
||||
BatchHash: common.HexToHash(batches[len(batches)-1].Hash),
|
||||
MsgQueueHash: common.HexToHash(batches[len(batches)-1].PostL1MessageQueueHash),
|
||||
}
|
||||
|
||||
batchProofsBytes, err := json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", task.TaskID, err)
|
||||
@@ -219,6 +243,9 @@ func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.Prov
|
||||
TaskData: string(batchProofsBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeBundle.String(), "hard_fork_name", hardForkName, "task_data", taskMsg.TaskData)
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
cutils "scroll-tech/coordinator/internal/utils"
|
||||
)
|
||||
|
||||
// ChunkProverTask the chunk prover task
|
||||
@@ -61,7 +62,20 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
|
||||
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
|
||||
unassignedChunkCount, getCountError := cp.chunkOrm.GetUnassignedChunkCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
|
||||
if getCountError != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
// Assign external prover if unassigned task number exceeds threshold
|
||||
if unassignedChunkCount < cp.cfg.ProverManager.ExternalProverThreshold {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
var chunkTask *orm.Chunk
|
||||
var hardForkName string
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpChunkTask *orm.Chunk
|
||||
@@ -86,6 +100,30 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
taskCtx.taskType = message.ProofTypeChunk
|
||||
taskCtx.chunkTask = tmpChunkTask
|
||||
|
||||
var checkErr error
|
||||
hardForkName, checkErr = cp.hardForkSanityCheck(ctx, taskCtx)
|
||||
if checkErr != nil {
|
||||
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Don't dispatch the same failing job to the same prover
|
||||
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
|
||||
if getFailedTaskError != nil {
|
||||
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
for i := 0; i < len(proverTasks); i++ {
|
||||
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
|
||||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
|
||||
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight, "task ID", tmpChunkTask.Hash, "prover name", taskCtx.ProverName, "prover public key", taskCtx.PublicKey)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
@@ -107,23 +145,6 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
hardForkName, getHardForkErr := cp.hardForkName(ctx, chunkTask)
|
||||
if getHardForkErr != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("retrieve hard fork name by chunk failed", "task_id", chunkTask.Hash, "err", getHardForkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
//if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
|
||||
// cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
// log.Error("incompatible prover version",
|
||||
// "requisite hard fork name", hardForkName,
|
||||
// "prover hard fork name", taskCtx.HardForkNames,
|
||||
// "task_id", chunkTask.Hash)
|
||||
// return nil, ErrCoordinatorInternalFailure
|
||||
//}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
@@ -142,7 +163,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, chunkTask, hardForkName)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
|
||||
@@ -159,26 +180,28 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) hardForkName(ctx *gin.Context, chunkTask *orm.Chunk) (string, error) {
|
||||
l2Block, getBlockErr := cp.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunkTask.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := encoding.GetHardforkName(cp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, chunk *orm.Chunk, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
|
||||
// Get block hashes.
|
||||
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
|
||||
if dbErr != nil || len(blockHashes) == 0 {
|
||||
return nil, fmt.Errorf("failed to fetch block hashes of a chunk, chunk hash:%s err:%w", task.TaskID, dbErr)
|
||||
}
|
||||
|
||||
var taskDetailBytes []byte
|
||||
taskDetail := message.ChunkTaskDetail{
|
||||
BlockHashes: blockHashes,
|
||||
BlockHashes: blockHashes,
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
}
|
||||
blockHashesBytes, err := json.Marshal(taskDetail)
|
||||
|
||||
if hardForkName == message.EuclidV2Fork {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
log.Error("unsupported hard fork name", "hard_fork_name", hardForkName)
|
||||
return nil, fmt.Errorf("unsupported hard fork name: %s", hardForkName)
|
||||
}
|
||||
|
||||
var err error
|
||||
taskDetailBytes, err = json.Marshal(taskDetail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", task.TaskID, err)
|
||||
}
|
||||
@@ -187,10 +210,12 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
UUID: task.UUID.String(),
|
||||
TaskID: task.TaskID,
|
||||
TaskType: int(message.ProofTypeChunk),
|
||||
TaskData: string(blockHashesBytes),
|
||||
TaskData: string(taskDetailBytes),
|
||||
HardForkName: hardForkName,
|
||||
}
|
||||
|
||||
log.Debug("TaskData", "task_id", task.TaskID, "task_type", message.ProofTypeChunk.String(), "hard_fork_name", hardForkName, "task_data", proverTaskSchema.TaskData)
|
||||
|
||||
return proverTaskSchema, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -9,9 +9,12 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
@@ -47,10 +50,81 @@ type BaseProverTask struct {
|
||||
}
|
||||
|
||||
type proverTaskContext struct {
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
HardForkNames map[string]struct{}
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
ProverProviderType uint8
|
||||
HardForkNames map[string]struct{}
|
||||
|
||||
taskType message.ProofType
|
||||
chunkTask *orm.Chunk
|
||||
batchTask *orm.Batch
|
||||
bundleTask *orm.Bundle
|
||||
}
|
||||
|
||||
// hardForkName get the chunk/batch/bundle hard fork name
|
||||
func (b *BaseProverTask) hardForkName(ctx *gin.Context, taskCtx *proverTaskContext) (string, error) {
|
||||
switch {
|
||||
case taskCtx.taskType == message.ProofTypeChunk:
|
||||
if taskCtx.chunkTask == nil {
|
||||
return "", errors.New("chunk task is nil")
|
||||
}
|
||||
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), taskCtx.chunkTask.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
|
||||
case taskCtx.taskType == message.ProofTypeBatch:
|
||||
if taskCtx.batchTask == nil {
|
||||
return "", errors.New("batch task is nil")
|
||||
}
|
||||
startChunk, getChunkErr := b.chunkOrm.GetChunkByHash(ctx, taskCtx.batchTask.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
|
||||
case taskCtx.taskType == message.ProofTypeBundle:
|
||||
if taskCtx.bundleTask == nil {
|
||||
return "", errors.New("bundle task is nil")
|
||||
}
|
||||
startBatch, getBatchErr := b.batchOrm.GetBatchByHash(ctx, taskCtx.bundleTask.StartBatchHash)
|
||||
if getBatchErr != nil {
|
||||
return "", getBatchErr
|
||||
}
|
||||
startChunk, getChunkErr := b.chunkOrm.GetChunkByHash(ctx, startBatch.StartChunkHash)
|
||||
if getChunkErr != nil {
|
||||
return "", getChunkErr
|
||||
}
|
||||
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
|
||||
if getBlockErr != nil {
|
||||
return "", getBlockErr
|
||||
}
|
||||
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
|
||||
return hardForkName, nil
|
||||
default:
|
||||
return "", errors.New("illegal task type")
|
||||
}
|
||||
}
|
||||
|
||||
// hardForkSanityCheck check the task's hard fork name is the same as prover
|
||||
func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTaskContext) (string, error) {
|
||||
hardForkName, getHardForkErr := b.hardForkName(ctx, taskCtx)
|
||||
if getHardForkErr != nil {
|
||||
return "", getHardForkErr
|
||||
}
|
||||
|
||||
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
|
||||
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
@@ -76,6 +150,13 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
ProverProviderType, ProverProviderTypeExist := ctx.Get(coordinatorType.ProverProviderTypeKey)
|
||||
if !ProverProviderTypeExist {
|
||||
// for backward compatibility, set ProverProviderType as internal
|
||||
ProverProviderType = float64(coordinatorType.ProverProviderTypeInternal)
|
||||
}
|
||||
ptc.ProverProviderType = uint8(ProverProviderType.(float64))
|
||||
|
||||
hardForkNamesStr, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, errors.New("get hard fork name from context failed")
|
||||
|
||||
@@ -171,23 +171,23 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
|
||||
|
||||
switch message.ProofType(proofParameter.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
var chunkProof message.ChunkProof
|
||||
chunkProof := &message.OpenVMChunkProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(&chunkProof, hardForkName)
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
|
||||
case message.ProofTypeBatch:
|
||||
var batchProof message.BatchProof
|
||||
batchProof := &message.OpenVMBatchProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(&batchProof, hardForkName)
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
|
||||
case message.ProofTypeBundle:
|
||||
var bundleProof message.BundleProof
|
||||
bundleProof := &message.OpenVMBundleProof{}
|
||||
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
|
||||
return unmarshalErr
|
||||
}
|
||||
success, verifyErr = m.verifier.VerifyBundleProof(&bundleProof, hardForkName)
|
||||
success, verifyErr = m.verifier.VerifyBundleProof(bundleProof, hardForkName)
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
@@ -265,7 +265,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
if proofParameter.Status != int(message.StatusOk) {
|
||||
if proofParameter.Status != int(coordinatorType.StatusOk) {
|
||||
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
|
||||
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
|
||||
|
||||
|
||||
@@ -10,31 +10,26 @@ import (
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
chunkVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
return &Verifier{cfg: cfg, OpenVMVkMap: map[string]struct{}{"mock_vk": {}}}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
|
||||
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof return a mock verification result for a BatchProof.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
|
||||
if proof.VmProof != nil && string(proof.VmProof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// VerifyBundleProof return a mock verification result for a BundleProof.
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.BundleProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -7,10 +7,8 @@ import (
|
||||
// InvalidTestProof invalid proof used in tests
|
||||
const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
// Verifier represents a rust ffi to a verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]struct{}
|
||||
BatchVKMap map[string]struct{}
|
||||
BundleVkMap map[string]struct{}
|
||||
OpenVMVkMap map[string]struct{}
|
||||
}
|
||||
|
||||
@@ -30,14 +30,12 @@ import (
|
||||
// in `*config.CircuitConfig` being changed
|
||||
type rustCircuitConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
}
|
||||
|
||||
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
|
||||
return &rustCircuitConfig{
|
||||
ForkName: cfg.ForkName,
|
||||
ParamsPath: cfg.ParamsPath,
|
||||
AssetsPath: cfg.AssetsPath,
|
||||
}
|
||||
}
|
||||
@@ -46,25 +44,23 @@ func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
|
||||
// Define a brand new struct here is to eliminate side effects in case fields
|
||||
// in `*config.VerifierConfig` being changed
|
||||
type rustVerifierConfig struct {
|
||||
LowVersionCircuit *rustCircuitConfig `json:"low_version_circuit"`
|
||||
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
|
||||
}
|
||||
|
||||
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
|
||||
return &rustVerifierConfig{
|
||||
LowVersionCircuit: newRustCircuitConfig(cfg.LowVersionCircuit),
|
||||
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
|
||||
}
|
||||
}
|
||||
|
||||
type rustVkDump struct {
|
||||
Chunk string `json:"chunk_vk"`
|
||||
Batch string `json:"batch_vk"`
|
||||
Bundle string `json:"bundle_vk"`
|
||||
}
|
||||
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
chunkVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
batchVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
bundleVKMap := map[string]struct{}{"mock_vk": {}}
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap, BundleVkMap: bundleVKMap}, nil
|
||||
}
|
||||
verifierConfig := newRustVerifierConfig(cfg)
|
||||
configBytes, err := json.Marshal(verifierConfig)
|
||||
if err != nil {
|
||||
@@ -80,45 +76,18 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]struct{}),
|
||||
BatchVKMap: make(map[string]struct{}),
|
||||
BundleVkMap: make(map[string]struct{}),
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
bundleVK, err := v.readVK(path.Join(cfg.HighVersionCircuit.AssetsPath, "vk_bundle.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
batchVK, err := v.readVK(path.Join(cfg.HighVersionCircuit.AssetsPath, "vk_batch.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chunkVK, err := v.readVK(path.Join(cfg.HighVersionCircuit.AssetsPath, "vk_chunk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.BundleVkMap[bundleVK] = struct{}{}
|
||||
v.BatchVKMap[batchVK] = struct{}{}
|
||||
v.ChunkVKMap[chunkVK] = struct{}{}
|
||||
|
||||
if err := v.loadLowVersionVKs(cfg); err != nil {
|
||||
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v.loadCurieVersionVKs()
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, batch verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.OpenVMBatchProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -136,16 +105,8 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Verifier.
|
||||
func (v *Verifier) VerifyChunkProof(proof *message.OpenVMChunkProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -164,15 +125,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string)
|
||||
}
|
||||
|
||||
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.BundleProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
}
|
||||
func (v *Verifier) VerifyBundleProof(proof *message.OpenVMBundleProof, forkName string) (bool, error) {
|
||||
buf, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -202,27 +155,36 @@ func (v *Verifier) readVK(filePat string) (string, error) {
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
}
|
||||
|
||||
// load low version vks, current is darwin
|
||||
func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error {
|
||||
bundleVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_bundle.vkey"))
|
||||
func (v *Verifier) loadOpenVMVks(forkName string) error {
|
||||
tempFile := path.Join(os.TempDir(), "openVmVk.json")
|
||||
defer func() {
|
||||
if err := os.Remove(tempFile); err != nil {
|
||||
log.Error("failed to remove temp file", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
forkNameCStr := C.CString(forkName)
|
||||
defer C.free(unsafe.Pointer(forkNameCStr))
|
||||
tempFileCStr := C.CString(tempFile)
|
||||
defer C.free(unsafe.Pointer(tempFileCStr))
|
||||
|
||||
C.dump_vk(forkNameCStr, tempFileCStr)
|
||||
|
||||
f, err := os.Open(tempFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
batchVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_batch.vkey"))
|
||||
byt, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunkVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_chunk.vkey"))
|
||||
if err != nil {
|
||||
|
||||
var dump rustVkDump
|
||||
if err := json.Unmarshal(byt, &dump); err != nil {
|
||||
return err
|
||||
}
|
||||
v.BundleVkMap[bundleVK] = struct{}{}
|
||||
v.BatchVKMap[batchVK] = struct{}{}
|
||||
v.ChunkVKMap[chunkVK] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Chunk] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Batch] = struct{}{}
|
||||
v.OpenVMVkMap[dump.Bundle] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Verifier) loadCurieVersionVKs() {
|
||||
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD9jfGkei+f0wNYpkjW7JO12EfU7CjYVBo+PGku3zaQJI64lbn6BwyTBa4RfrPFpV5mP47ix0sXZ+Wt5wklMLRW7OIJb1yfCDm+gkSsp3/Zqrxt4SY4rQ4WtHfynTCQ0KDi78jNuiFvwxO3ub3DkgGVaxMkGxTRP/Vz6E7MCZMUBR5wZFcMzJn+73f0wYjDxfj00krg9O1VrwVxbVV1ycLR6oQLcOgm/l+xwth8io0vDpF9OY21gD5DgJn9GgcYe8KoRVEbEqApLZPdBibpcSMTY9czZI2LnFcqrDDmYvhEwgjhZrsTog2xLXOODoOupZ/is5ekQ9Gi0y871b1mLlCGA="] = struct{}{}
|
||||
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
@@ -29,17 +29,9 @@ func TestFFI(t *testing.T) {
|
||||
as := assert.New(t)
|
||||
|
||||
cfg := &config.VerifierConfig{
|
||||
MockMode: false,
|
||||
LowVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPathLo,
|
||||
ForkName: "darwin",
|
||||
MinProverVersion: "",
|
||||
},
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: *paramsPath,
|
||||
AssetsPath: *assetsPathHi,
|
||||
ForkName: "darwinV2",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "",
|
||||
},
|
||||
}
|
||||
@@ -48,43 +40,43 @@ func TestFFI(t *testing.T) {
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "darwinV2")
|
||||
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(chunkOk1)
|
||||
t.Log("Verified chunk proof 1")
|
||||
|
||||
chunkProof2 := readChunkProof(*chunkProofPath2, as)
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "darwinV2")
|
||||
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(chunkOk2)
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "darwinV2")
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "euclidV2")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
}
|
||||
|
||||
func readBatchProof(filePat string, as *assert.Assertions) *message.BatchProof {
|
||||
func readBatchProof(filePat string, as *assert.Assertions) *types.OpenVMBatchProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &message.BatchProof{}
|
||||
proof := &types.OpenVMBatchProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
}
|
||||
|
||||
func readChunkProof(filePat string, as *assert.Assertions) *message.ChunkProof {
|
||||
func readChunkProof(filePat string, as *assert.Assertions) *types.OpenVMChunkProof {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
proof := &message.ChunkProof{}
|
||||
proof := &types.OpenVMChunkProof{}
|
||||
as.NoError(json.Unmarshal(byt, proof))
|
||||
|
||||
return proof
|
||||
|
||||
@@ -19,20 +19,23 @@ type Batch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
|
||||
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
|
||||
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
|
||||
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
|
||||
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
|
||||
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
|
||||
PrevL1MessageQueueHash string `json:"prev_l1_message_queue_hash" gorm:"column:prev_l1_message_queue_hash"`
|
||||
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
|
||||
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
|
||||
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
|
||||
ChallengeDigest string `json:"challenge_digest" gorm:"column:challenge_digest"`
|
||||
|
||||
// proof
|
||||
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
|
||||
@@ -83,7 +86,7 @@ func (*Batch) TableName() string {
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
var batch Batch
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND codec_version != 5 AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady))
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
@@ -95,6 +98,23 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTo
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// GetUnassignedBatchCount retrieves unassigned batch count.
|
||||
func (o *Batch) GetUnassignedBatchCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
db = db.Where("codec_version != 5")
|
||||
db = db.Where("batch.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Batch.GetUnassignedBatchCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatch retrieves assigned batch based on the specified limit.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
|
||||
@@ -59,7 +59,7 @@ func (*Bundle) TableName() string {
|
||||
func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
|
||||
var bundle Bundle
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
|
||||
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND codec_version != 5 AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady))
|
||||
err := db.Raw(sql).Scan(&bundle).Error
|
||||
if err != nil {
|
||||
@@ -71,6 +71,23 @@ func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, max
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// GetUnassignedBundleCount retrieves unassigned bundle count.
|
||||
func (o *Bundle) GetUnassignedBundleCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Bundle{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("batch_proofs_status = ?", int(types.BatchProofsStatusReady))
|
||||
db = db.Where("codec_version != 5")
|
||||
db = db.Where("bundle.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Bundle.GetUnassignedBundleCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedBundle retrieves assigned bundle based on the specified limit.
|
||||
// The returned bundle sorts in ascending order by their index.
|
||||
func (o *Bundle) GetAssignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
@@ -30,6 +28,8 @@ type Chunk struct {
|
||||
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
|
||||
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
|
||||
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
|
||||
PrevL1MessageQueueHash string `json:"prev_l1_message_queue_hash" gorm:"column:prev_l1_message_queue_hash"`
|
||||
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
|
||||
ParentChunkHash string `json:"parent_chunk_hash" gorm:"column:parent_chunk_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
ParentChunkStateRoot string `json:"parent_chunk_state_root" gorm:"column:parent_chunk_state_root"`
|
||||
@@ -76,7 +76,7 @@ func (*Chunk) TableName() string {
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) {
|
||||
var chunk Chunk
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND end_block_number <= %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND end_block_number <= %d AND codec_version != 5 AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, height)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
@@ -88,6 +88,23 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTo
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// GetUnassignedChunkCount retrieves unassigned chunk count.
|
||||
func (o *Chunk) GetUnassignedChunkCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (int64, error) {
|
||||
var count int64
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("end_block_number <= ?", height)
|
||||
db = db.Where("codec_version != 5")
|
||||
db = db.Where("chunk.deleted_at IS NULL")
|
||||
if err := db.Count(&count).Error; err != nil {
|
||||
return 0, fmt.Errorf("Chunk.GetUnassignedChunkCount error: %w", err)
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) {
|
||||
@@ -120,32 +137,6 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetProofsByBatchHash retrieves the proofs associated with a specific batch hash.
|
||||
// It returns a slice of decoded proofs (message.ChunkProof) obtained from the database.
|
||||
// The returned proofs are sorted in ascending order by their associated chunk index.
|
||||
func (o *Chunk) GetProofsByBatchHash(ctx context.Context, batchHash string) ([]*message.ChunkProof, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash", batchHash)
|
||||
db = db.Order("index ASC")
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash error: %w, batch hash: %v", err, batchHash)
|
||||
}
|
||||
|
||||
var proofs []*message.ChunkProof
|
||||
for _, chunk := range chunks {
|
||||
var proof message.ChunkProof
|
||||
if err := json.Unmarshal(chunk.Proof, &proof); err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", err, batchHash, chunk.Hash)
|
||||
}
|
||||
proofs = append(proofs, &proof)
|
||||
}
|
||||
|
||||
return proofs, nil
|
||||
}
|
||||
|
||||
// getLatestChunk retrieves the latest chunk from the database.
|
||||
func (o *Chunk) getLatestChunk(ctx context.Context) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
@@ -9,11 +9,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -117,6 +117,27 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetFailedProverTasksByHash retrieves the failed ProverTask records associated with the specified hash.
|
||||
// The returned prover task objects are sorted in descending order by their ids.
|
||||
func (o *ProverTask) GetFailedProverTasksByHash(ctx context.Context, taskType message.ProofType, hash string, limit int) ([]*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id", hash)
|
||||
db = db.Where("proving_status = ?", int(types.ProverProofInvalid))
|
||||
db = db.Order("id desc")
|
||||
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
var proverTasks []*ProverTask
|
||||
if err := db.Find(&proverTasks).Error; err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetFailedProverTasksByHash error: %w, hash: %v", err, hash)
|
||||
}
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key
|
||||
func (o *ProverTask) GetProverTaskByUUIDAndPublicKey(ctx context.Context, uuid, publicKey string) (*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
@@ -18,6 +18,8 @@ const (
|
||||
ProverName = "prover_name"
|
||||
// ProverVersion the prover version for context
|
||||
ProverVersion = "prover_version"
|
||||
// ProverProviderTypeKey the prover provider type for context
|
||||
ProverProviderTypeKey = "prover_provider_type"
|
||||
// HardForkName the hard fork name for context
|
||||
HardForkName = "hard_fork_name"
|
||||
)
|
||||
@@ -28,13 +30,22 @@ type LoginSchema struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type MessageWithoutProverProviderType struct {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverTypes []ProverType `json:"prover_types"`
|
||||
VKs []string `json:"vks"`
|
||||
}
|
||||
|
||||
// Message the login message struct
|
||||
type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
|
||||
VKs []string `form:"vks" json:"vks"`
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
ProverProviderType ProverProviderType `form:"prover_provider_type" json:"prover_provider_type,omitempty"`
|
||||
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
|
||||
VKs []string `form:"vks" json:"vks"`
|
||||
}
|
||||
|
||||
// LoginParameterWithHardForkName constructs new payload for login
|
||||
@@ -53,7 +64,7 @@ type LoginParameter struct {
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Message.Hash()
|
||||
hash, err := Hash(a.Message)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,7 +81,14 @@ func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LoginParameter) Verify() (bool, error) {
|
||||
hash, err := a.Message.Hash()
|
||||
var hash []byte
|
||||
var err error
|
||||
if a.Message.ProverProviderType == ProverProviderTypeUndefined {
|
||||
// for backward compatibility, calculate hash without ProverProviderType
|
||||
hash, err = Hash(a.Message.ToMessageWithoutProverProviderType())
|
||||
} else {
|
||||
hash, err = Hash(a.Message)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -85,15 +103,14 @@ func (a *LoginParameter) Verify() (bool, error) {
|
||||
return isValid, nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Message) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (m *Message) ToMessageWithoutProverProviderType() MessageWithoutProverProviderType {
|
||||
return MessageWithoutProverProviderType{
|
||||
Challenge: m.Challenge,
|
||||
ProverVersion: m.ProverVersion,
|
||||
ProverName: m.ProverName,
|
||||
ProverTypes: m.ProverTypes,
|
||||
VKs: m.VKs,
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// DecodeAndUnmarshalPubkey decodes a hex-encoded public key and unmarshal it into an ecdsa.PublicKey
|
||||
@@ -111,3 +128,14 @@ func (i *Message) DecodeAndUnmarshalPubkey(pubKeyHex string) (*ecdsa.PublicKey,
|
||||
}
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func Hash(i interface{}) ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
@@ -18,11 +18,12 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
|
||||
t.Run("sign", func(t *testing.T) {
|
||||
authMsg = LoginParameter{
|
||||
Message: Message{
|
||||
ProverName: "test1",
|
||||
ProverVersion: "v0.0.1",
|
||||
Challenge: "abcdef",
|
||||
ProverTypes: []ProverType{ProverTypeBatch},
|
||||
VKs: []string{"vk1", "vk2"},
|
||||
ProverName: "test1",
|
||||
ProverVersion: "v0.0.1",
|
||||
Challenge: "abcdef",
|
||||
ProverProviderType: ProverProviderTypeInternal,
|
||||
ProverTypes: []ProverType{ProverTypeOpenVM},
|
||||
VKs: []string{"vk1", "vk2"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
@@ -59,11 +60,12 @@ func TestGenerateSignature(t *testing.T) {
|
||||
|
||||
authMsg := LoginParameter{
|
||||
Message: Message{
|
||||
ProverName: "test",
|
||||
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
|
||||
ProverTypes: []ProverType{ProverTypeChunk},
|
||||
VKs: []string{"mock_vk"},
|
||||
ProverName: "test",
|
||||
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
|
||||
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
|
||||
ProverProviderType: ProverProviderTypeInternal,
|
||||
ProverTypes: []ProverType{ProverTypeOpenVM},
|
||||
VKs: []string{"mock_vk"},
|
||||
},
|
||||
PublicKey: publicKeyHex,
|
||||
}
|
||||
|
||||
@@ -6,15 +6,27 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
const (
|
||||
// StatusOk means generate proof success
|
||||
StatusOk RespStatus = iota
|
||||
// StatusProofError means generate proof failed
|
||||
StatusProofError
|
||||
)
|
||||
|
||||
// ProverType represents the type of prover.
|
||||
type ProverType uint8
|
||||
|
||||
func (r ProverType) String() string {
|
||||
switch r {
|
||||
case ProverTypeChunk:
|
||||
return "prover type chunk"
|
||||
case ProverTypeBatch:
|
||||
return "prover type batch"
|
||||
case ProverTypeChunkDeprecated:
|
||||
return "prover type chunk (deprecated)"
|
||||
case ProverTypeBatchDeprecated:
|
||||
return "prover type batch (deprecated)"
|
||||
case ProverTypeOpenVM:
|
||||
return "prover type openvm"
|
||||
default:
|
||||
return fmt.Sprintf("illegal prover type: %d", r)
|
||||
}
|
||||
@@ -23,11 +35,11 @@ func (r ProverType) String() string {
|
||||
const (
|
||||
// ProverTypeUndefined is an unknown prover type
|
||||
ProverTypeUndefined ProverType = iota
|
||||
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks
|
||||
ProverTypeChunk
|
||||
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks
|
||||
ProverTypeBatch
|
||||
// ProverTypeOpenVM signals it's an openvm prover
|
||||
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks, which is deprecated
|
||||
ProverTypeChunkDeprecated
|
||||
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks, which is deprecated
|
||||
ProverTypeBatchDeprecated
|
||||
// ProverTypeOpenVM
|
||||
ProverTypeOpenVM
|
||||
)
|
||||
|
||||
@@ -35,10 +47,33 @@ const (
|
||||
func MakeProverType(proofType message.ProofType) ProverType {
|
||||
switch proofType {
|
||||
case message.ProofTypeChunk:
|
||||
return ProverTypeChunk
|
||||
return ProverTypeChunkDeprecated
|
||||
case message.ProofTypeBatch, message.ProofTypeBundle:
|
||||
return ProverTypeBatch
|
||||
return ProverTypeBatchDeprecated
|
||||
default:
|
||||
return ProverTypeUndefined
|
||||
}
|
||||
}
|
||||
|
||||
// ProverProviderType represents the type of prover provider.
|
||||
type ProverProviderType uint8
|
||||
|
||||
func (r ProverProviderType) String() string {
|
||||
switch r {
|
||||
case ProverProviderTypeInternal:
|
||||
return "prover provider type internal"
|
||||
case ProverProviderTypeExternal:
|
||||
return "prover provider type external"
|
||||
default:
|
||||
return fmt.Sprintf("prover provider type: %d", r)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ProverProviderTypeUndefined is an unknown prover provider type
|
||||
ProverProviderTypeUndefined ProverProviderType = iota
|
||||
// ProverProviderTypeInternal is an internal prover provider type
|
||||
ProverProviderTypeInternal
|
||||
// ProverProviderTypeExternal is an external prover provider type
|
||||
ProverProviderTypeExternal
|
||||
)
|
||||
|
||||
17
coordinator/internal/utils/prover_name.go
Normal file
17
coordinator/internal/utils/prover_name.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package utils
|
||||
|
||||
import "strings"
|
||||
|
||||
// IsExternalProverNameMatch checks if the local and remote external prover names belong to the same provider.
|
||||
// It returns true if they do, otherwise false.
|
||||
func IsExternalProverNameMatch(localName, remoteName string) bool {
|
||||
local := strings.Split(localName, "_")
|
||||
remote := strings.Split(remoteName, "_")
|
||||
|
||||
if len(local) < 3 || len(remote) < 3 {
|
||||
return false
|
||||
}
|
||||
|
||||
// note the name of cloud prover is in the format of "cloud_prover_{provider-name}_index"
|
||||
return local[0] == remote[0] && local[1] == remote[1] && local[2] == remote[2]
|
||||
}
|
||||
@@ -20,11 +20,12 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
@@ -66,7 +67,7 @@ func randomURL() string {
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, forks []string) (*cron.Collector, *http.Server) {
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
db, err = testApps.GetGormDBClient()
|
||||
|
||||
@@ -83,18 +84,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
MockMode: true,
|
||||
LowVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "homestead",
|
||||
MinProverVersion: "v4.2.0",
|
||||
},
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "bernoulli",
|
||||
MinProverVersion: "v4.3.0",
|
||||
ForkName: "euclidV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
@@ -108,20 +101,17 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
},
|
||||
}
|
||||
|
||||
var chainConf params.ChainConfig
|
||||
for _, forkName := range forks {
|
||||
switch forkName {
|
||||
case "bernoulli":
|
||||
chainConf.BernoulliBlock = big.NewInt(100)
|
||||
case "homestead":
|
||||
chainConf.HomesteadBlock = big.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
|
||||
|
||||
router := gin.New()
|
||||
api.InitController(conf, &chainConf, db, nil)
|
||||
api.InitController(conf, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
EuclidTime: new(uint64),
|
||||
EuclidV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
route.Route(router, conf, nil)
|
||||
srv := &http.Server{
|
||||
Addr: coordinatorURL,
|
||||
@@ -141,7 +131,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.2.0"
|
||||
version.Version = "v4.4.89"
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
@@ -197,7 +187,7 @@ func TestApis(t *testing.T) {
|
||||
func testHandshake(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -210,7 +200,7 @@ func testHandshake(t *testing.T) {
|
||||
func testFailedHandshake(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"})
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
}()
|
||||
@@ -228,7 +218,7 @@ func testFailedHandshake(t *testing.T) {
|
||||
|
||||
func testGetTaskBlocked(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -272,7 +262,7 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
|
||||
func testOutdatedProverVersion(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -284,14 +274,12 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, chunkProver.proverVersion)
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, batchProver.proverVersion)
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.89, actual version: %s", batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
@@ -299,7 +287,7 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -382,7 +370,7 @@ func testValidProof(t *testing.T) {
|
||||
func testInvalidProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -470,7 +458,7 @@ func testInvalidProof(t *testing.T) {
|
||||
func testProofGeneratedFailed(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"})
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -571,7 +559,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
func testTimeoutProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"darwinV2"})
|
||||
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -594,7 +582,9 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
encodeData, err := json.Marshal(message.ChunkProof{})
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1)
|
||||
|
||||
@@ -79,11 +79,12 @@ func (r *mockProver) challenge(t *testing.T) string {
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []types.ProverType) (string, int, string) {
|
||||
authMsg := types.LoginParameter{
|
||||
Message: types.Message{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
ProverTypes: proverTypes,
|
||||
VKs: []string{"mock_vk"},
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
ProverProviderType: types.ProverProviderTypeInternal,
|
||||
ProverTypes: proverTypes,
|
||||
VKs: []string{"mock_vk"},
|
||||
},
|
||||
PublicKey: r.publicKey(),
|
||||
}
|
||||
@@ -200,20 +201,22 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
|
||||
}
|
||||
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
|
||||
proofMsgStatus := message.StatusOk
|
||||
proofMsgStatus := types.StatusOk
|
||||
if proofStatus == generatedFailed {
|
||||
proofMsgStatus = message.StatusProofError
|
||||
proofMsgStatus = types.StatusProofError
|
||||
}
|
||||
|
||||
var proof []byte
|
||||
switch proverTaskSchema.TaskType {
|
||||
case int(message.ProofTypeChunk):
|
||||
encodeData, err := json.Marshal(message.ChunkProof{})
|
||||
switch message.ProofType(proverTaskSchema.TaskType) {
|
||||
case message.ProofTypeChunk:
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
case int(message.ProofTypeBatch):
|
||||
encodeData, err := json.Marshal(message.BatchProof{})
|
||||
case message.ProofTypeBatch:
|
||||
encodeData, err := json.Marshal(message.OpenVMBatchProof{VmProof: &message.OpenVMProof{}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
@@ -222,16 +225,14 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
if proofStatus == verifiedFailed {
|
||||
switch proverTaskSchema.TaskType {
|
||||
case int(message.ProofTypeChunk):
|
||||
chunkProof := message.ChunkProof{}
|
||||
chunkProof.Proof = []byte(verifier.InvalidTestProof)
|
||||
encodeData, err := json.Marshal(&chunkProof)
|
||||
encodeData, err := json.Marshal(message.OpenVMChunkProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}, MetaData: struct {
|
||||
ChunkInfo *message.ChunkInfo `json:"chunk_info"`
|
||||
}{ChunkInfo: &message.ChunkInfo{}}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
case int(message.ProofTypeBatch):
|
||||
batchProof := message.BatchProof{}
|
||||
batchProof.Proof = []byte(verifier.InvalidTestProof)
|
||||
encodeData, err := json.Marshal(&batchProof)
|
||||
encodeData, err := json.Marshal(&message.OpenVMBatchProof{VmProof: &message.OpenVMProof{Proof: []byte(verifier.InvalidTestProof)}})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, encodeData)
|
||||
proof = encodeData
|
||||
|
||||
@@ -8,8 +8,8 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
)
|
||||
|
||||
@@ -36,8 +36,8 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
||||
@@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e h1:g8jtcGiHbjWYh/V7O245IDho3WfQT4CwEpBV+MhYDrg=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250103082839-ea3ec93d8c1e/go.mod h1:Ik3OBLl7cJxPC+CFyCBYNXBPek4wpdzkWehn/y5qLM8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
@@ -131,8 +131,8 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw=
|
||||
@@ -161,10 +161,10 @@ golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
|
||||
@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(24), cur)
|
||||
assert.Equal(t, int64(27), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(24), cur)
|
||||
assert.Equal(t, int64(27), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(24), version)
|
||||
assert.Equal(t, int64(27), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE chunk
|
||||
ADD COLUMN prev_l1_message_queue_hash VARCHAR DEFAULT '',
|
||||
ADD COLUMN post_l1_message_queue_hash VARCHAR DEFAULT '';
|
||||
|
||||
ALTER TABLE batch
|
||||
ADD COLUMN prev_l1_message_queue_hash VARCHAR DEFAULT '',
|
||||
ADD COLUMN post_l1_message_queue_hash VARCHAR DEFAULT '';
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE IF EXISTS chunk
|
||||
DROP COLUMN IF EXISTS prev_l1_message_queue_hash,
|
||||
DROP COLUMN IF EXISTS post_l1_message_queue_hash;
|
||||
|
||||
ALTER TABLE IF EXISTS batch
|
||||
DROP COLUMN IF EXISTS prev_l1_message_queue_hash,
|
||||
DROP COLUMN IF EXISTS post_l1_message_queue_hash;
|
||||
|
||||
-- +goose StatementEnd
|
||||
15
database/migrate/migrations/00026_add_challenge_digest.sql
Normal file
15
database/migrate/migrations/00026_add_challenge_digest.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE batch
|
||||
ADD COLUMN challenge_digest VARCHAR DEFAULT '';
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE IF EXISTS batch
|
||||
DROP COLUMN IF EXISTS challenge_digest;
|
||||
|
||||
-- +goose StatementEnd
|
||||
32
database/migrate/migrations/00027_ blob_upload.sql
Normal file
32
database/migrate/migrations/00027_ blob_upload.sql
Normal file
@@ -0,0 +1,32 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
CREATE TABLE blob_upload (
|
||||
batch_index BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
|
||||
platform SMALLINT NOT NULL,
|
||||
status SMALLINT NOT NULL,
|
||||
|
||||
-- metadata
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS batch_index_batch_hash_platform_uindex
|
||||
ON blob_upload(batch_index, batch_hash, platform) WHERE deleted_at IS NULL;
|
||||
|
||||
COMMENT ON COLUMN blob_upload.status IS 'undefined, pending, uploaded, failed';
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_blob_upload_status_platform ON blob_upload(status, platform) WHERE deleted_at IS NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_blob_upload_batch_index_batch_hash_status_platform
|
||||
ON blob_upload(batch_index, batch_hash, status, platform) WHERE deleted_at IS NULL;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE blob_upload;
|
||||
-- +goose StatementEnd
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user