Compare commits

..

3 Commits

Author SHA1 Message Date
georgehao
99bffcc406 xx 2024-07-10 09:57:11 +08:00
georgehao
b96d669f12 test 2024-07-10 09:56:12 +08:00
georgehao
222644ec2c test 2024-07-10 09:53:45 +08:00
210 changed files with 10356 additions and 27179 deletions

View File

@@ -29,7 +29,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-12-06
toolchain: nightly-2023-12-03
override: true
components: rustfmt, clippy
- name: Install Go

View File

@@ -2,8 +2,6 @@ name: Docker
on:
push:
tags:
- v**
env:
AWS_REGION: us-west-2
@@ -49,8 +47,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -94,8 +92,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -139,8 +137,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -184,8 +182,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -229,8 +227,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -274,8 +272,8 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -318,8 +316,8 @@ jobs:
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -363,7 +361,7 @@ jobs:
platforms: linux/amd64,linux/arm64
push: true
tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest

View File

@@ -43,7 +43,7 @@ jobs:
github.event.pull_request.draft == false &&
(github.event.action == 'ready_for_review' || needs.skip_check.outputs.should_skip != 'true')
runs-on: ubuntu-latest
timeout-minutes: 15
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@master

View File

@@ -1,28 +1,28 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.8.23
L2GETH_TAG=scroll-v5.3.0
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
update: ## Update dependencies
update:
go work sync
cd $(PWD)/bridge-history-api/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/rollup/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
lint: ## The code's format and security checks
lint: ## The code's format and security checks.
make -C rollup lint
make -C common lint
make -C coordinator lint
make -C database lint
make -C bridge-history-api lint
fmt: ## Format the code
fmt: ## format the code
go work sync
cd $(PWD)/bridge-history-api/ && go mod tidy
cd $(PWD)/common/ && go mod tidy
@@ -31,17 +31,17 @@ fmt: ## Format the code
cd $(PWD)/rollup/ && go mod tidy
cd $(PWD)/tests/integration-test/ && go mod tidy
goimports -local scroll-tech/bridge-history-api/ -w .
goimports -local scroll-tech/common/ -w .
goimports -local scroll-tech/coordinator/ -w .
goimports -local scroll-tech/database/ -w .
goimports -local scroll-tech/rollup/ -w .
goimports -local scroll-tech/tests/integration-test/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .
goimports -local $(PWD)/database/ -w .
goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/tests/integration-test/ -w .
dev_docker: ## Build docker images for development/testing usages
dev_docker: ## build docker images for development/testing usages
docker pull postgres
docker build -t scroll_l1geth --platform linux/amd64 ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth --platform linux/amd64 ./common/testcontainers/docker/l2geth/
docker build -t scroll_l1geth ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth ./common/testcontainers/docker/l2geth/
clean: ## Empty out the bin folder
@rm -rf build/bin

View File

@@ -1,6 +1,7 @@
# Scroll Monorepo
[![rollup](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
[![contracts](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml)
[![bridge-history](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
[![coordinator](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
[![prover](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml/badge.svg)](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
@@ -16,9 +17,10 @@
├── <a href="./common/">common</a>: Common libraries and types
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
├── <a href="./database">database</a>: Database client and schema definition
├── <a href="./src">l2geth</a>: Scroll execution node
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
├── <a href="./rollup">rollup</a>: Rollup-related services
├── <a href="https://github.com/scroll-tech/scroll-contracts.git">scroll-contracts</a>: solidity code for Scroll L1 bridge and rollup contracts and L2 bridge and pre-deployed contracts.
├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
└── <a href="./tests">tests</a>: Integration tests
</pre>

View File

@@ -37,6 +37,6 @@ reset-env:
go build -o $(PWD)/build/bin/bridgehistoryapi-db-cli ./cmd/db_cli && $(PWD)/build/bin/bridgehistoryapi-db-cli reset
bridgehistoryapi-docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile --platform=linux/amd64
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-fetcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-api.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/bridgehistoryapi-db-cli:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile

File diff suppressed because one or more lines are too long

View File

@@ -1,13 +0,0 @@
package backendabi
import (
"testing"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
func TestEventSignatures(t *testing.T) {
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), L1RevertBatchV0EventSig)
assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,uint256)")), L1RevertBatchV7EventSig)
}

View File

@@ -68,10 +68,7 @@ func action(ctx *cli.Context) error {
observability.Server(ctx, db)
l1MessageFetcher, err := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
if err != nil {
log.Crit("failed to create L1MessageFetcher", "err", err)
}
l1MessageFetcher := fetcher.NewL1MessageFetcher(subCtx, cfg.L1, db, l1Client)
go l1MessageFetcher.Start()
l2MessageFetcher := fetcher.NewL2MessageFetcher(subCtx, cfg.L2, db, l2Client)

View File

@@ -19,11 +19,7 @@
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
"MessageQueueV2Addr": "0x0000000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4",
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
"BlobScanAPIEndpoint": "https://api.blobscan.com/blobs/"
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"L2": {
"confirmation": 0,
@@ -43,7 +39,7 @@
"PufferGatewayAddr": "0x9eBf2f33526CD571f8b2ad312492cb650870CFd6",
"GatewayRouterAddr": "0x4C0926FF5252A435FD19e10ED15e5a249Ba19d79",
"MessageQueueAddr": "0x5300000000000000000000000000000000000000",
"BatchBridgeGatewayAddr": "0xa1a12158bE6269D7580C63eC5E609Cdc0ddD82bC"
"BatchBridgeGatewayAddr": "0x0000000000000000000000000000000000000000"
},
"db": {
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",

View File

@@ -1,8 +1,6 @@
module scroll-tech/bridge-history-api
go 1.22
toolchain go1.22.2
go 1.21
require (
github.com/gin-contrib/cors v1.5.0
@@ -10,43 +8,41 @@ require (
github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.11.0
golang.org/x/sync v0.6.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
)
replace github.com/scroll-tech/go-ethereum => github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 // It's a hotfix for the header hash incompatibility issue, pls change this with caution
require (
dario.cat/mergo v1.0.0 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/bits-and-blooms/bitset v1.12.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/bytedance/sonic v1.10.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.13.0 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
github.com/docker/docker v26.1.0+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
@@ -59,16 +55,17 @@ require (
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackc/pgx/v5 v5.5.4 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
@@ -93,28 +90,29 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -11,11 +11,9 @@ github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
@@ -25,8 +23,8 @@ github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -55,20 +53,21 @@ github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDdcjnS+4oc=
github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@@ -88,8 +87,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -110,8 +109,9 @@ github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
@@ -171,13 +171,13 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
@@ -203,8 +203,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -303,18 +303,16 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54 h1:qVpsVu1J91opTn6HYeuzWcBRVhQmPR8g05i+PlOjlI4=
github.com/scroll-tech/da-codec v0.1.3-0.20250226072559-f8a8d3898f54/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950 h1:qfOaRflvH1vtnFWloB7BveKlP/VqYgMqLJ6e9TlBJ/8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305084331-57148478e950/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
@@ -325,8 +323,6 @@ github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5g
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
@@ -343,14 +339,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
@@ -373,8 +369,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dz
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
@@ -387,21 +383,21 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -418,19 +414,20 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=

View File

@@ -6,7 +6,6 @@ import (
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/common/utils"
)
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
@@ -30,14 +29,7 @@ type FetcherConfig struct {
ScrollChainAddr string `json:"ScrollChainAddr"`
GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"`
MessageQueueV2Addr string `json:"MessageQueueV2Addr"`
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
BeaconNodeAPIEndpoint string `json:"BeaconNodeAPIEndpoint"`
BlobScanAPIEndpoint string `json:"BlobScanAPIEndpoint"`
BlockNativeAPIEndpoint string `json:"BlockNativeAPIEndpoint"`
}
// RedisConfig redis config
@@ -72,11 +64,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_BRIDGE_HISTORY")
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -2,7 +2,6 @@ package fetcher
import (
"context"
"fmt"
"math/big"
"time"
@@ -11,7 +10,6 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"gorm.io/gorm"
"scroll-tech/bridge-history-api/internal/config"
@@ -37,32 +35,13 @@ type L1MessageFetcher struct {
}
// NewL1MessageFetcher creates a new L1MessageFetcher instance.
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) (*L1MessageFetcher, error) {
blobClient := blob_client.NewBlobClients()
if cfg.BeaconNodeAPIEndpoint != "" {
beaconNodeClient, err := blob_client.NewBeaconNodeClient(cfg.BeaconNodeAPIEndpoint)
if err != nil {
log.Warn("failed to create BeaconNodeClient", "err", err)
} else {
blobClient.AddBlobClient(beaconNodeClient)
}
}
if cfg.BlobScanAPIEndpoint != "" {
blobClient.AddBlobClient(blob_client.NewBlobScanClient(cfg.BlobScanAPIEndpoint))
}
if cfg.BlockNativeAPIEndpoint != "" {
blobClient.AddBlobClient(blob_client.NewBlockNativeClient(cfg.BlockNativeAPIEndpoint))
}
if blobClient.Size() == 0 {
return nil, fmt.Errorf("no blob client is configured")
}
func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1MessageFetcher {
c := &L1MessageFetcher{
ctx: ctx,
cfg: cfg,
client: client,
eventUpdateLogic: logic.NewEventUpdateLogic(db, true),
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client, blobClient),
l1FetcherLogic: logic.NewL1FetcherLogic(cfg, db, client),
}
reg := prometheus.DefaultRegisterer
@@ -79,7 +58,7 @@ func NewL1MessageFetcher(ctx context.Context, cfg *config.FetcherConfig, db *gor
Help: "Latest blockchain height the L1 message fetcher has synced with.",
})
return c, nil
return c
}
// Start starts the L1 message fetching process.

View File

@@ -141,7 +141,7 @@ func (c *L2MessageFetcher) fetchAndSaveEvents(confirmation uint64) {
return
}
if updateErr := c.eventUpdateLogic.UpdateL2WithdrawMessageProofs(c.ctx, c.l2SyncHeight); updateErr != nil {
if updateErr := c.eventUpdateLogic.UpdateL1BatchIndexAndStatus(c.ctx, c.l2SyncHeight); updateErr != nil {
log.Error("failed to update L1 batch index and status", "from", from, "to", to, "err", updateErr)
return
}

View File

@@ -2,7 +2,7 @@ package logic
import (
"context"
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
@@ -125,11 +125,6 @@ func (b *EventUpdateLogic) L1InsertOrUpdate(ctx context.Context, l1FetcherResult
}
func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, batchIndex, startBlock, endBlock uint64) error {
if startBlock > endBlock {
log.Warn("start block is greater than end block", "start", startBlock, "end", endBlock)
return nil
}
l2WithdrawMessages, err := b.crossMessageOrm.GetL2WithdrawalsByBlockRange(ctx, startBlock, endBlock)
if err != nil {
log.Error("failed to get L2 withdrawals by batch index", "batch index", batchIndex, "err", err)
@@ -153,7 +148,7 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce {
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actual next message nonce", l2WithdrawMessages[0].MessageNonce)
return errors.New("nonce mismatch")
return fmt.Errorf("nonce mismatch")
}
messageHashes := make([]common.Hash, len(l2WithdrawMessages))
@@ -178,42 +173,24 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
return nil
}
// UpdateL2WithdrawMessageProofs updates L2 withdrawal message proofs.
func (b *EventUpdateLogic) UpdateL2WithdrawMessageProofs(ctx context.Context, height uint64) error {
lastUpdatedFinalizedBlockHeight, err := b.batchEventOrm.GetLastUpdatedFinalizedBlockHeight(ctx)
// UpdateL1BatchIndexAndStatus updates L1 finalized batch index and status
func (b *EventUpdateLogic) UpdateL1BatchIndexAndStatus(ctx context.Context, height uint64) error {
finalizedBatches, err := b.batchEventOrm.GetFinalizedBatchesLEBlockHeight(ctx, height)
if err != nil {
log.Error("failed to get last updated finalized block height", "error", err)
return err
}
finalizedBatches, err := b.batchEventOrm.GetUnupdatedFinalizedBatchesLEBlockHeight(ctx, height)
if err != nil {
log.Error("failed to get unupdated finalized batches >= block height", "error", err)
log.Error("failed to get batches >= block height", "error", err)
return err
}
for _, finalizedBatch := range finalizedBatches {
log.Info("update finalized batch or bundle info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
// This method is compatible with both "finalize by batch" and "finalize by bundle" modes:
// - In "finalize by batch" mode, each batch emits a FinalizedBatch event.
// - In "finalize by bundle" mode, all batches in the bundle emit only one FinalizedBatch event, using the last batch's index and hash.
//
// The method updates two types of information in L2 withdrawal messages:
// 1. Withdraw proof generation:
// - finalize by batch: Generates proofs for each batch.
// - finalize by bundle: Generates proofs for the entire bundle at once.
// 2. Batch index updating:
// - finalize by batch: Updates the batch index for withdrawal messages in each processed batch.
// - finalize by bundle: Updates the batch index for all withdrawal messages in the bundle, using the index of the last batch in the bundle.
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, lastUpdatedFinalizedBlockHeight+1, finalizedBatch.EndBlockNumber); updateErr != nil {
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
log.Info("update finalized batch info of L2 withdrawals", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber)
if updateErr := b.updateL2WithdrawMessageInfos(ctx, finalizedBatch.BatchIndex, finalizedBatch.StartBlockNumber, finalizedBatch.EndBlockNumber); updateErr != nil {
log.Error("failed to update L2 withdraw message infos", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", updateErr)
return updateErr
}
if dbErr := b.batchEventOrm.UpdateBatchEventStatus(ctx, finalizedBatch.BatchIndex); dbErr != nil {
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "lastUpdatedFinalizedBlockHeight", lastUpdatedFinalizedBlockHeight, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
log.Error("failed to update batch event status as updated", "index", finalizedBatch.BatchIndex, "start", finalizedBatch.StartBlockNumber, "end", finalizedBatch.EndBlockNumber, "error", dbErr)
return dbErr
}
lastUpdatedFinalizedBlockHeight = finalizedBatch.EndBlockNumber
b.eventUpdateLogicL1FinalizeBatchEventL2BlockUpdateHeight.Set(float64(finalizedBatch.EndBlockNumber))
}
return nil

View File

@@ -407,7 +407,7 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
return err
}
} else {
// The transactions are sorted, thus we set the score as their index.
// The transactions are sorted, thus we set the score as their indices.
for _, tx := range txs {
txBytes, err := json.Marshal(tx)
if err != nil {

View File

@@ -2,16 +2,13 @@ package logic
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
backendabi "scroll-tech/bridge-history-api/abi"
"scroll-tech/bridge-history-api/internal/config"
@@ -22,17 +19,15 @@ import (
// L1EventParser the l1 event parser
type L1EventParser struct {
cfg *config.FetcherConfig
client *ethclient.Client
blobClient blob_client.BlobClient
cfg *config.FetcherConfig
client *ethclient.Client
}
// NewL1EventParser creates l1 event parser
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client, blobClient blob_client.BlobClient) *L1EventParser {
func NewL1EventParser(cfg *config.FetcherConfig, client *ethclient.Client) *L1EventParser {
return &L1EventParser{
cfg: cfg,
client: client,
blobClient: blobClient,
cfg: cfg,
client: client,
}
}
@@ -173,14 +168,6 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
case backendabi.L1DepositWrappedTokenSig:
event := backendabi.WrappedTokenMessageEvent{}
if err := utils.UnpackLog(backendabi.L1WrappedTokenGatewayABI, &event, "DepositWrappedToken", vlog); err != nil {
log.Error("Failed to unpack DepositWrappedToken event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
case backendabi.L1SentMessageEventSig:
event := backendabi.L1SentMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
@@ -237,21 +224,7 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
}
// ParseL1BatchEventLogs parses L1 watched batch events.
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client, blockTimestampsMap map[uint64]uint64) ([]*orm.BatchEvent, error) {
// Since multiple CommitBatch events per transaction is introduced >= CodecV7,
// with one transaction carrying multiple blobs,
// each CommitBatch event corresponds to a blob containing block range data.
// To correctly process these events, we need to:
// 1. Parsing the associated blob data to extract the block range for each event
// 2. Tracking the parent batch hash for each processed CommitBatch event, to:
// - Validate the batch hash, since parent batch hash is needed to calculate the batch hash
// - Derive the index of the current batch by the number of parent batch hashes tracked
// In commitBatches and commitAndFinalizeBatch, the parent batch hash is passed in calldata,
// so that we can use it to get the first batch's parent batch hash, and derive the rest.
// The index map serves this purpose with:
// Key: commit transaction hash
// Value: parent batch hashes (in order) for each processed CommitBatch event in the transaction
txBlobIndexMap := make(map[common.Hash][]common.Hash)
func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.Log, client *ethclient.Client) ([]*orm.BatchEvent, error) {
var l1BatchEvents []*orm.BatchEvent
for _, vlog := range logs {
switch vlog.Topics[0] {
@@ -266,59 +239,11 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
log.Error("Failed to get commit batch tx or the tx is still pending", "err", err, "isPending", isPending)
return nil, err
}
version, startBlock, endBlock, err := utils.GetBatchVersionAndBlockRangeFromCalldata(commitTx.Data())
startBlock, endBlock, err := utils.GetBatchRangeFromCalldata(commitTx.Data())
if err != nil {
log.Error("Failed to get batch range from calldata", "hash", commitTx.Hash().String(), "height", vlog.BlockNumber)
return nil, err
}
if version >= 7 { // It's a batch with version >= 7.
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(version))
if err != nil {
return nil, fmt.Errorf("unsupported codec version: %v, err: %w", version, err)
}
// we append the batch hash to the slice for the current commit transaction after processing the batch.
// that means the current index of the batch within the transaction is len(txBlobIndexMap[vlog.TxHash]).
currentIndex := len(txBlobIndexMap[vlog.TxHash])
if currentIndex >= len(commitTx.BlobHashes()) {
return nil, fmt.Errorf("commit transaction %s has %d blobs, but trying to access index %d (batch index %d)",
vlog.TxHash.String(), len(commitTx.BlobHashes()), currentIndex, event.BatchIndex.Uint64())
}
blobVersionedHash := commitTx.BlobHashes()[currentIndex]
// validate the batch hash
var parentBatchHash common.Hash
if currentIndex == 0 {
parentBatchHash, err = utils.GetParentBatchHashFromCalldata(commitTx.Data())
if err != nil {
return nil, fmt.Errorf("failed to get parent batch header from calldata, tx hash: %s, err: %w", vlog.TxHash.String(), err)
}
} else {
// here we need to subtract 1 from the current index to get the parent batch hash.
parentBatchHash = txBlobIndexMap[vlog.TxHash][currentIndex-1]
}
calculatedBatch, err := codec.NewDABatchFromParams(event.BatchIndex.Uint64(), blobVersionedHash, parentBatchHash)
if err != nil {
return nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", event.BatchIndex.Uint64(), err)
}
if calculatedBatch.Hash() != event.BatchHash {
return nil, fmt.Errorf("batch hash mismatch for batch %d, expected: %s, got: %s", event.BatchIndex, event.BatchHash.String(), calculatedBatch.Hash().String())
}
blocks, err := e.getBatchBlockRangeFromBlob(ctx, codec, blobVersionedHash, blockTimestampsMap[vlog.BlockNumber])
if err != nil {
return nil, fmt.Errorf("failed to process versioned blob, blobVersionedHash: %s, block number: %d, blob index: %d, err: %w",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex, err)
}
if len(blocks) == 0 {
return nil, fmt.Errorf("no blocks found in the blob, blobVersionedHash: %s, block number: %d, blob index: %d",
blobVersionedHash.String(), vlog.BlockNumber, currentIndex)
}
startBlock = blocks[0].Number()
endBlock = blocks[len(blocks)-1].Number()
txBlobIndexMap[vlog.TxHash] = append(txBlobIndexMap[vlog.TxHash], event.BatchHash)
}
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeCommitted),
BatchIndex: event.BatchIndex.Uint64(),
@@ -327,8 +252,8 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
EndBlockNumber: endBlock,
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1RevertBatchV0EventSig:
event := backendabi.L1RevertBatchV0Event{}
case backendabi.L1RevertBatchEventSig:
event := backendabi.L1RevertBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch", vlog); err != nil {
log.Error("Failed to unpack RevertBatch event", "err", err)
return nil, err
@@ -339,19 +264,6 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
case backendabi.L1RevertBatchV7EventSig:
event := backendabi.L1RevertBatchV7Event{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "RevertBatch0", vlog); err != nil {
log.Error("Failed to unpack RevertBatch event", "err", err)
return nil, err
}
for i := event.StartBatchIndex.Uint64(); i <= event.FinishBatchIndex.Uint64(); i++ {
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeReverted),
BatchIndex: i,
L1BlockNumber: vlog.BlockNumber,
})
}
case backendabi.L1FinalizeBatchEventSig:
event := backendabi.L1FinalizeBatchEvent{}
if err := utils.UnpackLog(backendabi.IScrollChainABI, &event, "FinalizeBatch", vlog); err != nil {
@@ -361,7 +273,6 @@ func (e *L1EventParser) ParseL1BatchEventLogs(ctx context.Context, logs []types.
l1BatchEvents = append(l1BatchEvents, &orm.BatchEvent{
BatchStatus: int(btypes.BatchStatusTypeFinalized),
BatchIndex: event.BatchIndex.Uint64(),
BatchHash: event.BatchHash.String(),
L1BlockNumber: vlog.BlockNumber,
})
}
@@ -408,16 +319,6 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
QueueIndex: index,
})
}
case backendabi.L1ResetDequeuedTransactionEventSig:
event := backendabi.L1ResetDequeuedTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "ResetDequeuedTransaction", vlog); err != nil {
log.Error("Failed to unpack ResetDequeuedTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeResetDequeuedTransaction,
QueueIndex: event.StartIndex.Uint64(),
})
case backendabi.L1DropTransactionEventSig:
event := backendabi.L1DropTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {
@@ -469,27 +370,3 @@ func getRealFromAddress(ctx context.Context, eventSender common.Address, eventMe
}
return sender.String(), nil
}
func (e *L1EventParser) getBatchBlockRangeFromBlob(ctx context.Context, codec encoding.Codec, blobVersionedHash common.Hash, l1BlockTime uint64) ([]encoding.DABlock, error) {
blob, err := e.blobClient.GetBlobByVersionedHashAndBlockTime(ctx, blobVersionedHash, l1BlockTime)
if err != nil {
return nil, fmt.Errorf("failed to get blob %s: %w", blobVersionedHash.Hex(), err)
}
if blob == nil {
return nil, fmt.Errorf("blob %s not found", blobVersionedHash.Hex())
}
blobPayload, err := codec.DecodeBlob(blob)
if err != nil {
return nil, fmt.Errorf("blob %s decode error: %w", blobVersionedHash.Hex(), err)
}
blocks := blobPayload.Blocks()
if len(blocks) == 0 {
return nil, fmt.Errorf("empty blocks in blob %s", blobVersionedHash.Hex())
}
log.Debug("Successfully processed blob", "blobVersionedHash", blobVersionedHash.Hex(), "blocksCount", len(blocks))
return blocks, nil
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client"
"gorm.io/gorm"
backendabi "scroll-tech/bridge-history-api/abi"
@@ -50,10 +49,13 @@ type L1FetcherLogic struct {
}
// NewL1FetcherLogic creates L1 fetcher logic
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client, blobClient blob_client.BlobClient) *L1FetcherLogic {
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -67,8 +69,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
}
gatewayList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -100,30 +105,6 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
if common.HexToAddress(cfg.ETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.ETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.ETHGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
if common.HexToAddress(cfg.GasTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.GasTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.GasTokenGatewayAddr))
}
if common.HexToAddress(cfg.WrappedTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
}
if common.HexToAddress(cfg.MessageQueueV2Addr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.MessageQueueV2Addr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{
@@ -134,7 +115,7 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
client: client,
addressList: addressList,
gatewayList: gatewayList,
parser: NewL1EventParser(cfg, client, blobClient),
parser: NewL1EventParser(cfg, client),
}
reg := prometheus.DefaultRegisterer
@@ -173,10 +154,14 @@ func (f *L1FetcherLogic) getBlocksAndDetectReorg(ctx context.Context, from, to u
return false, 0, lastBlockHash, blocks, nil
}
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) ([]*orm.CrossMessage, error) {
func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, blocks []*types.Block) (map[uint64]uint64, []*orm.CrossMessage, error) {
var l1RevertedTxs []*orm.CrossMessage
blockTimestampsMap := make(map[uint64]uint64)
for i := from; i <= to; i++ {
block := blocks[i-from]
blockTimestampsMap[block.NumberU64()] = block.Time()
for _, tx := range block.Transactions() {
// Gateways: L1 deposit.
// Messenger: L1 deposit retry (replayMessage), L1 deposit refund (dropMessage), L2 withdrawal's claim (relayMessageWithProof).
@@ -188,7 +173,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
receipt, receiptErr := f.client.TransactionReceipt(ctx, tx.Hash())
if receiptErr != nil {
log.Error("Failed to get transaction receipt", "txHash", tx.Hash().String(), "err", receiptErr)
return nil, receiptErr
return nil, nil, receiptErr
}
// Check if the transaction is failed
@@ -200,7 +185,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
sender, senderErr := signer.Sender(tx)
if senderErr != nil {
log.Error("get sender failed", "chain id", tx.ChainId().Uint64(), "tx hash", tx.Hash().String(), "err", senderErr)
return nil, senderErr
return nil, nil, senderErr
}
l1RevertedTxs = append(l1RevertedTxs, &orm.CrossMessage{
@@ -214,7 +199,7 @@ func (f *L1FetcherLogic) getRevertedTxs(ctx context.Context, from, to uint64, bl
})
}
}
return l1RevertedTxs, nil
return blockTimestampsMap, l1RevertedTxs, nil
}
func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]types.Log, error) {
@@ -225,7 +210,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 17)
query.Topics[0] = make([]common.Hash, 14)
query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1DepositERC721Sig
@@ -234,15 +219,12 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][5] = backendabi.L1RelayedMessageEventSig
query.Topics[0][6] = backendabi.L1FailedRelayedMessageEventSig
query.Topics[0][7] = backendabi.L1CommitBatchEventSig
query.Topics[0][8] = backendabi.L1RevertBatchV0EventSig
query.Topics[0][9] = backendabi.L1RevertBatchV7EventSig
query.Topics[0][10] = backendabi.L1FinalizeBatchEventSig
query.Topics[0][11] = backendabi.L1QueueTransactionEventSig
query.Topics[0][12] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][13] = backendabi.L1DropTransactionEventSig
query.Topics[0][14] = backendabi.L1ResetDequeuedTransactionEventSig
query.Topics[0][15] = backendabi.L1BridgeBatchDepositSig
query.Topics[0][16] = backendabi.L1DepositWrappedTokenSig
query.Topics[0][8] = backendabi.L1RevertBatchEventSig
query.Topics[0][9] = backendabi.L1FinalizeBatchEventSig
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][12] = backendabi.L1DropTransactionEventSig
query.Topics[0][13] = backendabi.L1BridgeBatchDepositSig
eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil {
@@ -266,18 +248,12 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
return isReorg, reorgHeight, blockHash, nil, nil
}
l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
blockTimestampsMap, l1RevertedTxs, err := f.getRevertedTxs(ctx, from, to, blocks)
if err != nil {
log.Error("L1Fetcher getRevertedTxs failed", "from", from, "to", to, "error", err)
return false, 0, common.Hash{}, nil, err
}
// Map block number to block timestamp to avoid fetching block header multiple times to get block timestamp.
blockTimestampsMap := make(map[uint64]uint64)
for _, block := range blocks {
blockTimestampsMap[block.NumberU64()] = block.Time()
}
eventLogs, err := f.l1FetcherLogs(ctx, from, to)
if err != nil {
log.Error("L1Fetcher l1FetcherLogs failed", "from", from, "to", to, "error", err)
@@ -290,7 +266,7 @@ func (f *L1FetcherLogic) L1Fetcher(ctx context.Context, from, to uint64, lastBlo
return false, 0, common.Hash{}, nil, err
}
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client, blockTimestampsMap)
l1BatchEvents, err := f.parser.ParseL1BatchEventLogs(ctx, eventLogs, f.client)
if err != nil {
log.Error("failed to parse L1 batch event logs", "from", from, "to", to, "err", err)
return false, 0, common.Hash{}, nil, err
@@ -363,10 +339,6 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
case btypes.MessageQueueEventTypeDropTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
// one ResetDequeuedTransaction event could indicate reset multiple skipped messages,
// this metric only counts the number of events, not the number of skipped messages.
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_reset_skipped_messages").Add(1)
}
}

View File

@@ -54,6 +54,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -67,6 +68,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -98,11 +100,6 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L2FetcherLogic{

View File

@@ -2,7 +2,6 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -46,7 +45,7 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
db = db.Model(&BatchEvent{})
db = db.Order("l1_block_number desc")
if err := db.First(&batch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
@@ -54,26 +53,8 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
return batch.L1BlockNumber, nil
}
// GetLastUpdatedFinalizedBlockHeight returns the last updated finalized block height in db.
func (c *BatchEvent) GetLastUpdatedFinalizedBlockHeight(ctx context.Context) (uint64, error) {
var batch BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
db = db.Where("batch_status = ?", btypes.BatchStatusTypeFinalized)
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUpdated)
db = db.Order("batch_index desc")
if err := db.First(&batch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
// No finalized batch found, return genesis batch's end block number.
return 0, nil
}
return 0, fmt.Errorf("failed to get last updated finalized block height, error: %w", err)
}
return batch.EndBlockNumber, nil
}
// GetUnupdatedFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
// GetFinalizedBatchesLEBlockHeight returns the finalized batches with end block <= given block height in db.
func (c *BatchEvent) GetFinalizedBatchesLEBlockHeight(ctx context.Context, blockHeight uint64) ([]*BatchEvent, error) {
var batches []*BatchEvent
db := c.db.WithContext(ctx)
db = db.Model(&BatchEvent{})
@@ -82,16 +63,19 @@ func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Conte
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
db = db.Order("batch_index asc")
if err := db.Find(&batches).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get unupdated finalized batches >= block height, error: %w", err)
return nil, fmt.Errorf("failed to get batches >= block height, error: %w", err)
}
return batches, nil
}
// InsertOrUpdateBatchEvents inserts a new batch event or updates an existing one based on the BatchStatusType.
func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvents []*BatchEvent) error {
var maxFinalizedBatchIndex uint64
var containsFinalizedEvent bool
var maxL1BlockNumber uint64
for _, l1BatchEvent := range l1BatchEvents {
db := c.db
db = db.WithContext(ctx)
@@ -108,16 +92,17 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
return fmt.Errorf("failed to insert or ignore batch event, error: %w", err)
}
case btypes.BatchStatusTypeFinalized:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
updateFields["l1_block_number"] = l1BatchEvent.L1BlockNumber
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
containsFinalizedEvent = true
// get the maxFinalizedBatchIndex, which signals all the batch before it are all finalized
if l1BatchEvent.BatchIndex > maxFinalizedBatchIndex {
maxFinalizedBatchIndex = l1BatchEvent.BatchIndex
}
if l1BatchEvent.L1BlockNumber > maxL1BlockNumber {
maxL1BlockNumber = l1BatchEvent.L1BlockNumber
}
case btypes.BatchStatusTypeReverted:
db = db.Where("batch_index = ?", l1BatchEvent.BatchIndex)
db = db.Where("batch_status != ?", btypes.BatchStatusTypeFinalized)
db = db.Where("batch_hash = ?", l1BatchEvent.BatchHash)
updateFields["batch_status"] = btypes.BatchStatusTypeReverted
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
@@ -128,6 +113,21 @@ func (c *BatchEvent) InsertOrUpdateBatchEvents(ctx context.Context, l1BatchEvent
}
}
}
if containsFinalizedEvent {
db := c.db
db = db.WithContext(ctx)
db = db.Model(&BatchEvent{})
updateFields := make(map[string]interface{})
// After darwin, FinalizeBatch event signals a range of batches are finalized,
// thus losing the batch hash info. Meanwhile, only batch_index is enough to update finalized batches.
db = db.Where("batch_index <= ?", maxFinalizedBatchIndex)
db = db.Where("batch_status != ?", btypes.BatchStatusTypeFinalized)
updateFields["batch_status"] = btypes.BatchStatusTypeFinalized
updateFields["l1_block_number"] = maxL1BlockNumber
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("failed to update batch event, error: %w", err)
}
}
return nil
}

View File

@@ -2,7 +2,6 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -85,7 +84,7 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
db = db.Order("l2_block_number desc")
}
if err := db.First(&message).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return 0, nil
}
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
@@ -109,7 +108,7 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
db = db.Order("message_nonce desc")
if err := db.First(&message).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
@@ -128,10 +127,10 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Order("message_nonce asc")
if err := db.Find(&messages).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return nil, nil
}
return nil, fmt.Errorf("failed to get L2 withdrawals by block range, error: %v", err)
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
}
return messages, nil
}
@@ -218,12 +217,6 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
db = db.Where("tx_status = ?", types.TxStatusTypeSkipped)
// reset skipped messages that the nonce is greater than or equal to the queue index.
db = db.Where("message_nonce >= ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSent
}
if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
@@ -237,7 +230,7 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeDequeueTransaction, btypes.MessageQueueEventTypeResetDequeuedTransaction:
case btypes.MessageQueueEventTypeDequeueTransaction:
continue
case btypes.MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.

View File

@@ -70,7 +70,6 @@ const (
MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction
MessageQueueEventTypeResetDequeuedTransaction
)
// BatchStatusType represents the type of batch status.

View File

@@ -38,7 +38,7 @@ func GetBlockNumber(ctx context.Context, client *ethclient.Client, confirmations
// @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID {
return errors.New("event signature mismatch")
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
@@ -66,68 +66,32 @@ func ComputeMessageHash(
return common.BytesToHash(crypto.Keccak256(data))
}
// GetBatchVersionAndBlockRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uint64, error) {
const methodIDLength = 4
if len(txData) < methodIDLength {
return 0, 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
}
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldata(calldata []byte) (uint64, uint64, error) {
method := backendabi.IScrollChainABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
// special case: import genesis batch
method = backendabi.IScrollChainABI.Methods["importGenesisBatch"]
_, err2 := method.Inputs.Unpack(calldata[4:])
if err2 == nil {
// genesis batch
return 0, 0, nil
}
// none of "commitBatch" and "importGenesisBatch" match, give up
return 0, 0, err
}
values, err := method.Inputs.Unpack(txData[methodIDLength:])
args := commitBatchArgs{}
err = method.Inputs.Copy(&args, values)
if err != nil {
return 0, 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
}
var chunks [][]byte
var version uint8
if method.Name == "importGenesisBatch" {
return 0, 0, 0, nil
} else if method.Name == "commitBatch" {
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
var args commitBatchArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
version = args.Version
} else if method.Name == "commitBatchWithBlobProof" {
type commitBatchWithBlobProofArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
BlobDataProof []byte
}
var args commitBatchWithBlobProofArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
version = args.Version
} else if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
if len(values) < 3 {
return 0, 0, 0, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
}
var ok bool
version, ok = values[0].(uint8)
if !ok {
return 0, 0, 0, fmt.Errorf("invalid version type: %T", values[0])
}
return version, 0, 0, nil
return 0, 0, err
}
var startBlock uint64
@@ -136,48 +100,19 @@ func GetBatchVersionAndBlockRangeFromCalldata(txData []byte) (uint8, uint64, uin
// decode blocks from chunk and assume that there's no empty chunk
// | 1 byte | 60 bytes | ... | 60 bytes |
// | num blocks | block 1 | ... | block n |
if len(chunks) == 0 {
return 0, 0, 0, errors.New("invalid chunks")
if len(args.Chunks) == 0 {
return 0, 0, errors.New("invalid chunks")
}
chunk := chunks[0]
chunk := args.Chunks[0]
block := chunk[1:61] // first block in chunk
startBlock = binary.BigEndian.Uint64(block[0:8])
chunk = chunks[len(chunks)-1]
chunk = args.Chunks[len(args.Chunks)-1]
lastBlockIndex := int(chunk[0]) - 1
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
finishBlock = binary.BigEndian.Uint64(block[0:8])
return version, startBlock, finishBlock, err
}
// GetParentBatchHashFromCalldata gets the parent batch hash from calldata.
// It only supports commitBatches and commitAndFinalizeBatch, which only accept batches >= v7.
func GetParentBatchHashFromCalldata(txData []byte) (common.Hash, error) {
const methodIDLength = 4
if len(txData) < methodIDLength {
return common.Hash{}, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
}
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
if err != nil {
return common.Hash{}, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err)
}
values, err := method.Inputs.Unpack(txData[methodIDLength:])
if err != nil {
return common.Hash{}, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err)
}
if method.Name == "commitBatches" || method.Name == "commitAndFinalizeBatch" {
if len(values) < 3 {
return common.Hash{}, fmt.Errorf("insufficient arguments for %s, expected 3, got %d", method.Name, len(values))
}
parentBatchHash, ok := values[1].([32]byte)
if !ok {
return common.Hash{}, fmt.Errorf("invalid parentBatchHash type: %T", values[1])
}
return common.BytesToHash(parentBatchHash[:]), nil
}
return common.Hash{}, fmt.Errorf("method %s does not support parent batch header", method.Name)
return startBlock, finishBlock, err
}
// GetBlocksInRange gets a batch of blocks for a block range [start, end] inclusive.

File diff suppressed because one or more lines are too long

View File

@@ -91,7 +91,7 @@ linters-settings:
#local-prefixes: github.com/org/project
gocyclo:
# minimal code complexity to report, 30 by default (but we recommend 10-20)
min-complexity: 40
min-complexity: 30
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
@@ -254,9 +254,6 @@ issues:
- linters:
- wsl
text: "expressions should not be cuddled with declarations or returns"
- linters:
- govet
text: 'shadow: declaration of "(err|ctx)" shadows declaration at'
# Independently from option `exclude` we use default exclude patterns,
# it can be disabled by this option. To list all

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./
@@ -11,13 +11,11 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/api && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/bridgehistoryapi-api
cd /src/bridge-history-api/cmd/api && go build -v -p 4 -o /bin/bridgehistoryapi-api
# Pull bridgehistoryapi-api into a second stage deploy ubuntu container
FROM ubuntu:20.04
# Pull bridgehistoryapi-api into a second stage deploy alpine container
FROM alpine:latest
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
COPY --from=builder /bin/bridgehistoryapi-api /bin/
WORKDIR /app
ENTRYPOINT ["bridgehistoryapi-api"]
ENTRYPOINT ["bridgehistoryapi-api"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY ./bridge-history-api/go.* ./
@@ -10,11 +10,10 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/db_cli && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
cd /src/bridge-history-api/cmd/db_cli && go build -v -p 4 -o /bin/db_cli
# Pull db_cli into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-ldl"
# Pull db_cli into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/db_cli /bin/
WORKDIR /app
ENTRYPOINT ["db_cli"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM golang:1.21-alpine3.19 as base
WORKDIR /src
COPY go.mod* ./
@@ -11,14 +11,11 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/bridge-history-api/cmd/fetcher && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/bridgehistoryapi-fetcher
cd /src/bridge-history-api/cmd/fetcher && go build -v -p 4 -o /bin/bridgehistoryapi-fetcher
# Pull bridgehistoryapi-fetcher into a second stage deploy ubuntu container
FROM ubuntu:20.04
# Pull bridgehistoryapi-fetcher into a second stage deploy alpine container
FROM alpine:latest
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install ca-certificates vim netcat-openbsd net-tools curl -y
RUN update-ca-certificates
COPY --from=builder /bin/bridgehistoryapi-fetcher /bin/
WORKDIR /app
ENTRYPOINT ["bridgehistoryapi-fetcher"]
ENTRYPOINT ["bridgehistoryapi-fetcher"]

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as chef
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
WORKDIR app
FROM chef as planner
@@ -23,6 +23,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -35,12 +36,10 @@ RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy ubuntu container
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
# ENV CHAIN_ID=534353
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/coordinator_api /bin/

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -15,12 +16,10 @@ RUN go mod download -x
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/coordinator/cmd/cron/ && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/coordinator_cron
cd /src/coordinator/cmd/cron/ && go build -v -p 4 -o /bin/coordinator_cron
# Pull coordinator into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
# Pull coordinator into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/coordinator_cron /bin/
WORKDIR /app
ENTRYPOINT ["coordinator_cron"]
ENTRYPOINT ["coordinator_cron"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-alpine-builder:1.21 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -16,11 +17,10 @@ FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/database/cmd && CGO_LDFLAGS="-Wl,--no-as-needed -ldl" go build -v -p 4 -o /bin/db_cli
cd /src/database/cmd && go build -v -p 4 -o /bin/db_cli
# Pull db_cli into a second stage deploy ubuntu container
FROM ubuntu:20.04
ENV CGO_LDFLAGS="-ldl"
# Pull db_cli into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/db_cli /bin/
WORKDIR /app
ENTRYPOINT ["db_cli"]

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -18,13 +19,11 @@ RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle
# Pull gas_oracle into a second stage deploy ubuntu container
# Pull gas_oracle into a second stage deploy alpine container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/gas_oracle /bin/
WORKDIR /app
ENTRYPOINT ["gas_oracle"]
ENTRYPOINT ["gas_oracle"]

View File

@@ -1,23 +0,0 @@
FROM ubuntu:24.04 AS builder
RUN apt-get update -y && apt-get upgrade -y
# Install basic packages
RUN apt-get install build-essential curl wget git pkg-config -y
# Install dev-packages
RUN apt-get install libclang-dev libssl-dev llvm -y
# Install Rust
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="/root/.cargo/bin:${PATH}"
ENV CARGO_HOME=/root/.cargo
COPY . /src
RUN cd /src/zkvm-prover && make prover
FROM ubuntu:24.04 AS runtime
COPY --from=builder /src/zkvm-prover/target/release/prover /usr/local/bin/
ENTRYPOINT ["prover"]

View File

@@ -1,5 +0,0 @@
assets/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -1,5 +1,5 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
@@ -7,6 +7,7 @@ COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
@@ -18,11 +19,9 @@ RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
# Pull rollup_relayer into a second stage deploy ubuntu container
# Pull rollup_relayer into a second stage deploy alpine container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/rollup_relayer /bin/

89
common/forks/forks.go Normal file
View File

@@ -0,0 +1,89 @@
package forks
import (
"math"
"math/big"
"sort"
"github.com/scroll-tech/go-ethereum/params"
)
// CollectSortedForkHeights returns a sorted set of block numbers that one or more forks are activated on
func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool, map[string]uint64) {
type nameFork struct {
name string
block *big.Int
}
forkHeightNameMap := make(map[uint64]string)
for _, fork := range []nameFork{
{name: "homestead", block: config.HomesteadBlock},
{name: "daoFork", block: config.DAOForkBlock},
{name: "eip150", block: config.EIP150Block},
{name: "eip155", block: config.EIP155Block},
{name: "eip158", block: config.EIP158Block},
{name: "byzantium", block: config.ByzantiumBlock},
{name: "constantinople", block: config.ConstantinopleBlock},
{name: "petersburg", block: config.PetersburgBlock},
{name: "istanbul", block: config.IstanbulBlock},
{name: "muirGlacier", block: config.MuirGlacierBlock},
{name: "berlin", block: config.BerlinBlock},
{name: "london", block: config.LondonBlock},
{name: "arrowGlacier", block: config.ArrowGlacierBlock},
{name: "archimedes", block: config.ArchimedesBlock},
{name: "shanghai", block: config.ShanghaiBlock},
{name: "bernoulli", block: config.BernoulliBlock},
{name: "curie", block: config.CurieBlock},
} {
if fork.block == nil {
continue
}
height := fork.block.Uint64()
// only keep latest fork for at each height, discard the rest
forkHeightNameMap[height] = fork.name
}
forkHeightsMap := make(map[uint64]bool)
forkNameHeightMap := make(map[string]uint64)
for height, name := range forkHeightNameMap {
forkHeightsMap[height] = true
forkNameHeightMap[name] = height
}
var forkHeights []uint64
for height := range forkHeightsMap {
forkHeights = append(forkHeights, height)
}
sort.Slice(forkHeights, func(i, j int) bool {
return forkHeights[i] < forkHeights[j]
})
return forkHeights, forkHeightsMap, forkNameHeightMap
}
// BlocksUntilFork returns the number of blocks until the next fork
// returns 0 if there is no fork scheduled for the future
func BlocksUntilFork(blockHeight uint64, forkHeights []uint64) uint64 {
for _, forkHeight := range forkHeights {
if forkHeight > blockHeight {
return forkHeight - blockHeight
}
}
return 0
}
// BlockRange returns the block range of the hard fork
// Need ensure the forkHeights is incremental
func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64) {
to = math.MaxInt64
for _, height := range forkHeights {
if currentForkHeight < height {
to = height
return
}
from = height
}
return
}

142
common/forks/forks_test.go Normal file
View File

@@ -0,0 +1,142 @@
package forks
import (
"math"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/require"
)
func TestCollectSortedForkBlocks(t *testing.T) {
l, m, n := CollectSortedForkHeights(&params.ChainConfig{
ArchimedesBlock: big.NewInt(0),
ShanghaiBlock: big.NewInt(3),
BernoulliBlock: big.NewInt(3),
CurieBlock: big.NewInt(4),
})
require.Equal(t, l, []uint64{
0,
3,
4,
})
require.Equal(t, map[uint64]bool{
3: true,
4: true,
0: true,
}, m)
require.Equal(t, map[string]uint64{
"archimedes": 0,
"bernoulli": 3,
"curie": 4,
}, n)
}
func TestBlocksUntilFork(t *testing.T) {
tests := map[string]struct {
block uint64
forks []uint64
expected uint64
}{
"NoFork": {
block: 44,
forks: []uint64{},
expected: 0,
},
"BeforeFork": {
block: 0,
forks: []uint64{1, 5},
expected: 1,
},
"OnFork": {
block: 1,
forks: []uint64{1, 5},
expected: 4,
},
"OnLastFork": {
block: 5,
forks: []uint64{1, 5},
expected: 0,
},
"AfterFork": {
block: 5,
forks: []uint64{1, 5},
expected: 0,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
require.Equal(t, test.expected, BlocksUntilFork(test.block, test.forks))
})
}
}
func TestBlockRange(t *testing.T) {
tests := []struct {
name string
forkHeight uint64
forkHeights []uint64
expectedFrom uint64
expectedTo uint64
}{
{
name: "ToInfinite",
forkHeight: 300,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 300,
expectedTo: math.MaxInt64,
},
{
name: "To300",
forkHeight: 200,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 200,
expectedTo: 300,
},
{
name: "To200",
forkHeight: 100,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 100,
expectedTo: 200,
},
{
name: "To100",
forkHeight: 0,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 0,
expectedTo: 100,
},
{
name: "To200-1",
forkHeight: 100,
forkHeights: []uint64{100, 200},
expectedFrom: 100,
expectedTo: 200,
},
{
name: "To2",
forkHeight: 1,
forkHeights: []uint64{1, 2},
expectedFrom: 1,
expectedTo: 2,
},
{
name: "ToInfinite-1",
forkHeight: 0,
forkHeights: []uint64{0},
expectedFrom: 0,
expectedTo: math.MaxInt64,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
from, to := BlockRange(test.forkHeight, test.forkHeights)
require.Equal(t, test.expectedFrom, from)
require.Equal(t, test.expectedTo, to)
})
}
}

View File

@@ -1,12 +1,10 @@
module scroll-tech/common
go 1.22
toolchain go1.22.2
go 1.21
require (
github.com/Masterminds/semver/v3 v3.2.1
github.com/bits-and-blooms/bitset v1.20.0
github.com/bits-and-blooms/bitset v1.12.0
github.com/docker/docker v26.1.0+incompatible
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
@@ -15,8 +13,8 @@ require (
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/stretchr/testify v1.10.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.30.0
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
github.com/testcontainers/testcontainers-go/modules/postgres v0.30.0
@@ -32,7 +30,7 @@ require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/aws/aws-sdk-go-v2 v1.21.2 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.45 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect
@@ -55,18 +53,18 @@ require (
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/cloudflare/cfssl v1.6.5 // indirect
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 // indirect
github.com/consensys/bavard v0.1.29 // indirect
github.com/consensys/gnark-crypto v0.16.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/containerd v1.7.12 // indirect
github.com/containerd/continuity v0.4.2 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/typeurl/v2 v2.1.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/distribution/reference v0.5.0 // indirect
github.com/docker/buildx v0.12.0-rc2.0.20231219140829-617f538cb315 // indirect
github.com/docker/cli v25.0.4-0.20240305161310-2bf4225ad269+incompatible // indirect
@@ -79,7 +77,7 @@ require (
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsevents v0.1.1 // indirect
@@ -119,9 +117,9 @@ require (
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/in-toto/in-toto-golang v0.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -136,12 +134,12 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
@@ -184,8 +182,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect
@@ -193,17 +190,16 @@ require (
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/cobra v1.8.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.4.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
@@ -214,7 +210,7 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
@@ -231,21 +227,19 @@ require (
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/term v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/term v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect

View File

@@ -27,11 +27,9 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
@@ -72,8 +70,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
@@ -121,10 +119,10 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60 h1:NlkpaLBPFr05mNJWVMH7PP4L30gFG6k4z1QpypLUSh8=
github.com/compose-spec/compose-go/v2 v2.0.0-rc.8.0.20240228111658-a0507e98fe60/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc=
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
@@ -153,10 +151,10 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -165,8 +163,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X6GqbFowYdYdI0L9bwxL07jyPZIdepyZ0=
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@@ -214,8 +212,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
@@ -381,13 +379,13 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
@@ -436,8 +434,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -455,8 +453,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
@@ -471,9 +469,8 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
@@ -630,18 +627,16 @@ github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRr
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
@@ -663,8 +658,6 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE=
github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU=
@@ -674,8 +667,8 @@ github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IEx
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
@@ -705,10 +698,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
@@ -721,12 +714,10 @@ github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4D
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw=
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI=
@@ -763,9 +754,8 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 h1:DZH5n7L3L8RxKdSyJHZt7WePgwdhHnPhQFdQSJaHF+o=
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w=
github.com/zmap/zlint/v3 v3.5.0 h1:Eh2B5t6VKgVH0DFmTwOqE50POvyDhUaU9T2mJOe1vfQ=
@@ -806,15 +796,11 @@ go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
@@ -829,8 +815,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
@@ -840,8 +826,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -860,8 +846,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
@@ -873,8 +859,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -914,23 +900,22 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -945,8 +930,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -6,8 +6,6 @@ export RUST_BACKTRACE=full
export RUST_LOG=debug
export RUST_MIN_STACK=100000000
export PROVER_OUTPUT_DIR=test_zkp_test
export SCROLL_PROVER_ASSETS_DIR=/assets/test_assets
export DARWIN_V2_TEST_DIR=/assets
#export LD_LIBRARY_PATH=/:/usr/local/cuda/lib64
mkdir -p $PROVER_OUTPUT_DIR
@@ -15,16 +13,32 @@ mkdir -p $PROVER_OUTPUT_DIR
REPO=$(realpath ../..)
function build_test_bins() {
cd impl
cargo build --release
ln -f -s $(realpath target/release/libzkp.so) $REPO/prover/core/lib
ln -f -s $(realpath target/release/libzkp.so) $REPO/coordinator/internal/logic/verifier/lib
cd $REPO/prover
make tests_binary
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
cd $REPO/coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd $REPO/common/libzkp
}
function build_test_bins_old() {
cd $REPO
cd prover
make libzkp
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
cd ..
cd coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd ..
cd common/libzkp
}
build_test_bins
rm -rf $PROVER_OUTPUT_DIR/*
#rm -rf test_zkp_test/*
#rm -rf prover.log verifier.log
$REPO/prover/prover.test --exact zk_circuits_handler::darwin_v2::tests::test_circuits 2>&1 | tee prover.log
#$REPO/prover/core.test -test.v 2>&1 | tee prover.log
$REPO/coordinator/verifier.test -test.v 2>&1 | tee verifier.log

File diff suppressed because it is too large Load Diff

View File

@@ -8,14 +8,24 @@ edition = "2021"
crate-type = ["cdylib"]
[patch.crates-io]
# patched add rkyv support & MSRV 1.77
alloy-primitives = { git = "https://github.com/scroll-tech/alloy-core", branch = "v0.8.21" }
ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
gobuild = { git = "https://github.com/scroll-tech/gobuild.git" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
#ethers-etherscan = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
#ethers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
[dependencies]
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.1.0-rc.6", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.1.0-rc.6", package = "scroll-zkvm-verifier" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", default-features = false, features = ["parallel_syn", "scroll"] }
base64 = "0.13.0"
env_logger = "0.9.0"
@@ -25,7 +35,6 @@ once_cell = "1.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
anyhow = "1.0.86"
[profile.test]
opt-level = 3

View File

@@ -1 +1 @@
nightly-2024-12-06
nightly-2023-12-03

View File

@@ -0,0 +1,198 @@
use crate::{
types::{CheckChunkProofsResponse, ProofResult},
utils::{
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
OUTPUT_DIR,
},
};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
check_chunk_hashes,
consts::AGG_VK_FILENAME,
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof,
};
use snark_verifier_sdk::verify_evm_calldata;
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_batch_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
// VK file must exist, it is optional and logged as a warning in prover.
if !file_exists(assets_dir, &AGG_VK_FILENAME) {
panic!("{} must exist in folder {}", *AGG_VK_FILENAME, assets_dir);
}
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_batch_verify");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
let check_result: Result<bool, String> = panic_catch(|| {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
if chunk_proofs.is_empty() {
return Err("provided chunk proofs are empty.".to_string());
}
let prover_ref = PROVER.get().expect("failed to get reference to PROVER.");
let valid = prover_ref.check_protocol_of_chunks(&chunk_proofs);
Ok(valid)
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match check_result {
Ok(valid) => CheckChunkProofsResponse {
ok: valid,
error: None,
},
Err(err) => CheckChunkProofsResponse {
ok: false,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_batch_proof(
chunk_hashes: *const c_char,
chunk_proofs: *const c_char,
) -> *const c_char {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let chunk_hashes = c_char_to_vec(chunk_hashes);
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_hashes = serde_json::from_slice::<Vec<ChunkInfo>>(&chunk_hashes)
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
if chunk_hashes.len() != chunk_proofs.len() {
return Err(format!("chunk hashes and chunk proofs lengths mismatch: chunk_hashes.len() = {}, chunk_proofs.len() = {}",
chunk_hashes.len(), chunk_proofs.len()));
}
let chunk_hashes_proofs: Vec<(_,_)> = chunk_hashes
.into_iter()
.zip(chunk_proofs.clone())
.collect();
check_chunk_hashes("", &chunk_hashes_proofs).map_err(|e| format!("failed to check chunk info: {e:?}"))?;
let batch = BatchProvingTask {
chunk_proofs
};
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_agg_evm_proof(batch, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
},
Err(err) => ProofResult {
message: None,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let fork_name_str = c_char_to_str(fork_name);
let fork_id = match fork_name_str {
"bernoulli" => 2,
"curie" => 3,
_ => {
log::warn!("unexpected fork_name {fork_name_str}, treated as curie");
3
}
};
let verified = panic_catch(|| {
if fork_id == 2 {
// before upgrade#3(DA Compression)
verify_evm_calldata(
include_bytes!("plonk_verifier_0.10.3.bin").to_vec(),
proof.calldata(),
)
} else {
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
}
});
verified.unwrap_or(false) as c_char
}
// This function is only used for debugging on Go side.
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn block_traces_to_chunk_info(block_traces: *const c_char) -> *const c_char {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces).unwrap();
let witness_block = chunk_trace_to_witness_block(block_traces).unwrap();
let chunk_info = ChunkInfo::from_witness_block(&witness_block, false);
let chunk_info_bytes = serde_json::to_vec(&chunk_info).unwrap();
vec_to_c_char(chunk_info_bytes)
}

View File

@@ -0,0 +1,108 @@
use crate::{
types::ProofResult,
utils::{
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
OUTPUT_DIR,
},
};
use libc::c_char;
use prover::{
consts::CHUNK_VK_FILENAME,
utils::init_env_and_log,
zkevm::{Prover, Verifier},
BlockTrace, ChunkProof, ChunkProvingTask,
};
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_chunk_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
// VK file must exist, it is optional and logged as a warning in prover.
if !file_exists(assets_dir, &CHUNK_VK_FILENAME) {
panic!("{} must exist in folder {}", *CHUNK_VK_FILENAME, assets_dir);
}
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_chunk_verify");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
let chunk = ChunkProvingTask::from(block_traces);
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_chunk_proof(chunk, None, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
},
Err(err) => ProofResult {
message: None,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(proof: *const c_char) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
verified.unwrap_or(false) as c_char
}

View File

@@ -1,82 +1,4 @@
mod batch;
mod chunk;
mod types;
mod utils;
mod verifier;
use std::path::Path;
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use verifier::{TaskType, VerifierConfig};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init(config: *const c_char) {
let config_str = c_char_to_str(config);
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
verifier::init(verifier_config);
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let fork_name_str = c_char_to_str(fork_name);
// Skip verification for darwinV2 as we can't host darwinV2 and euclid verifiers on the same
// binary.
if fork_name_str == "darwinV2" {
return true as c_char;
}
let proof = c_char_to_vec(proof);
let verifier = verifier::get_verifier(fork_name_str);
if let Err(e) = verifier {
log::warn!("failed to get verifier, error: {:#}", e);
return 0 as c_char;
}
match verifier.unwrap().verify(task_type, proof) {
Err(e) => {
log::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn dump_vk(fork_name: *const c_char, file: *const c_char) {
_dump_vk(fork_name, file);
}
fn _dump_vk(fork_name: *const c_char, file: *const c_char) {
let fork_name_str = c_char_to_str(fork_name);
let verifier = verifier::get_verifier(fork_name_str);
if let Ok(verifier) = verifier {
verifier.as_ref().dump_vk(Path::new(c_char_to_str(file)));
}
}

Binary file not shown.

View File

@@ -0,0 +1,22 @@
use serde::{Deserialize, Serialize};
// Represents the result of a chunk proof checking operation.
// `ok` indicates whether the proof checking was successful.
// `error` provides additional details in case the check failed.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CheckChunkProofsResponse {
pub ok: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
// Encapsulates the result from generating a proof.
// `message` holds the generated proof in byte slice format.
// `error` provides additional details in case the proof generation failed.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ProofResult {
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<Vec<u8>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}

View File

@@ -1,9 +1,29 @@
use once_cell::sync::Lazy;
use std::{
ffi::CStr,
env,
ffi::{CStr, CString},
os::raw::c_char,
panic::{catch_unwind, AssertUnwindSafe},
path::PathBuf,
};
// Only used for debugging.
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
/// # Safety
#[no_mangle]
pub extern "C" fn free_c_chars(ptr: *mut c_char) {
if ptr.is_null() {
log::warn!("Try to free an empty pointer!");
return;
}
unsafe {
let _ = CString::from_raw(ptr);
}
}
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()
@@ -14,6 +34,21 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
cstr.to_bytes().to_vec()
}
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
CString::new(string).unwrap().into_raw()
}
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
CString::new(bytes).unwrap().into_raw()
}
pub(crate) fn file_exists(dir: &str, filename: &str) -> bool {
let mut path = PathBuf::from(dir);
path.push(filename);
path.exists()
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() {

View File

@@ -1,76 +0,0 @@
#![allow(static_mut_refs)]
mod euclid;
use anyhow::{bail, Result};
use euclid::EuclidVerifier;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, path::Path, rc::Rc};
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TaskType {
Chunk,
Batch,
Bundle,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VKDump {
pub chunk_vk: String,
pub batch_vk: String,
pub bundle_vk: String,
}
pub trait ProofVerifier {
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
fn dump_vk(&self, file: &Path);
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CircuitConfig {
pub fork_name: String,
pub params_path: String,
pub assets_path: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VerifierConfig {
pub low_version_circuit: CircuitConfig,
pub high_version_circuit: CircuitConfig,
}
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
pub fn init(config: VerifierConfig) {
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
unsafe {
VERIFIER_LOW
.set(VerifierPair(
config.high_version_circuit.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
}
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_LOW.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
}
bail!("failed to get verifier, key not found, {}", fork_name)
}

View File

@@ -1,65 +0,0 @@
use super::{ProofVerifier, TaskType, VKDump};
use anyhow::Result;
use crate::utils::panic_catch;
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
use euclid_verifier::verifier::{BatchVerifier, BundleVerifier, ChunkVerifier};
use std::{fs::File, path::Path};
pub struct EuclidVerifier {
chunk_verifier: ChunkVerifier,
batch_verifier: BatchVerifier,
bundle_verifier: BundleVerifier,
}
impl EuclidVerifier {
pub fn new(assets_dir: &str) -> Self {
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
let config = Path::new(assets_dir).join("root-verifier-vm-config");
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
Self {
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up chunk verifier"),
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up batch verifier"),
bundle_verifier: BundleVerifier::setup(&config, &exe, &verifier_bin)
.expect("Setting up bundle verifier"),
}
}
}
impl ProofVerifier for EuclidVerifier {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.chunk_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.batch_verifier
.verify_proof(proof.proof.as_root_proof().unwrap())
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.bundle_verifier
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
}
})
.map_err(|err_str: String| anyhow::anyhow!(err_str))
}
fn dump_vk(&self, file: &Path) {
let f = File::create(file).expect("Failed to open file to dump VK");
let dump = VKDump {
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
};
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
}
}

View File

@@ -1,12 +1,15 @@
// BatchVerifier is used to:
// - Verify a batch proof
// - Verify a bundle proof
void init(char* config);
void init_batch_prover(char* params_dir, char* assets_dir);
void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char* check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);
char* get_chunk_vk();
char* gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);
char verify_chunk_proof(char* proof, char* fork_name);
void dump_vk(char* fork_name, char* file);
char* block_traces_to_chunk_info(char* block_traces);
void free_c_chars(char* ptr);

View File

@@ -2,7 +2,6 @@ package testcontainers
import (
"context"
"errors"
"fmt"
"log"
"os"
@@ -21,10 +20,9 @@ import (
// TestcontainerApps testcontainers struct
type TestcontainerApps struct {
postgresContainer *postgres.PostgresContainer
l2GethContainer *testcontainers.DockerContainer
poSL1Container compose.ComposeStack
web3SignerContainer *testcontainers.DockerContainer
postgresContainer *postgres.PostgresContainer
l2GethContainer *testcontainers.DockerContainer
poSL1Container compose.ComposeStack
// common time stamp in nanoseconds.
Timestamp int
@@ -113,51 +111,10 @@ func (t *TestcontainerApps) StartPoSL1Container() error {
return nil
}
func (t *TestcontainerApps) StartWeb3SignerContainer(chainId int) error {
if t.web3SignerContainer != nil && t.web3SignerContainer.IsRunning() {
return nil
}
var (
err error
rootDir string
)
if rootDir, err = findProjectRootDir(); err != nil {
return fmt.Errorf("failed to find project root directory: %v", err)
}
// web3signerconf/keyconf.yaml may contain multiple keys configured and web3signer then choses one corresponding to from field of tx
web3SignerConfDir := filepath.Join(rootDir, "common", "testcontainers", "web3signerconf")
req := testcontainers.ContainerRequest{
Image: "consensys/web3signer:develop",
ExposedPorts: []string{"9000/tcp"},
Cmd: []string{"--key-config-path", "/web3signerconf/", "eth1", "--chain-id", fmt.Sprintf("%d", chainId)},
Files: []testcontainers.ContainerFile{
{
HostFilePath: web3SignerConfDir,
ContainerFilePath: "/",
FileMode: 0o777,
},
},
WaitingFor: wait.ForLog("ready to handle signing requests"),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
}
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
if err != nil {
log.Printf("failed to start web3signer container: %s", err)
return err
}
t.web3SignerContainer, _ = container.(*testcontainers.DockerContainer)
return nil
}
// GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint
func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
if t.poSL1Container == nil {
return "", errors.New("PoS L1 container is not running")
return "", fmt.Errorf("PoS L1 container is not running")
}
contrainer, err := t.poSL1Container.ServiceContainer(context.Background(), "geth")
if err != nil {
@@ -178,7 +135,7 @@ func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
// GetDBEndPoint returns the endpoint of the running postgres container
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
return "", errors.New("postgres is not running")
return "", fmt.Errorf("postgres is not running")
}
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
}
@@ -186,7 +143,7 @@ func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
return "", errors.New("l2 geth is not running")
return "", fmt.Errorf("l2 geth is not running")
}
endpoint, err := t.l2GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
if err != nil {
@@ -195,14 +152,6 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
return endpoint, nil
}
// GetWeb3SignerEndpoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetWeb3SignerEndpoint() (string, error) {
if t.web3SignerContainer == nil || !t.web3SignerContainer.IsRunning() {
return "", errors.New("web3signer is not running")
}
return t.web3SignerContainer.PortEndpoint(context.Background(), "9000/tcp", "http")
}
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
endpoint, err := t.GetDBEndPoint()
@@ -251,11 +200,6 @@ func (t *TestcontainerApps) Free() {
t.poSL1Container = nil
}
}
if t.web3SignerContainer != nil && t.web3SignerContainer.IsRunning() {
if err := t.web3SignerContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop web3signer container: %s", err)
}
}
}
// findProjectRootDir find project root directory
@@ -273,7 +217,7 @@ func findProjectRootDir() (string, error) {
parentDir := filepath.Dir(currentDir)
if parentDir == currentDir {
return "", errors.New("go.work file not found in any parent directory")
return "", fmt.Errorf("go.work file not found in any parent directory")
}
currentDir = parentDir

View File

@@ -44,11 +44,6 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
endpoint, err = testApps.GetWeb3SignerEndpoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
// test free testcontainers
testApps.Free()
endpoint, err = testApps.GetDBEndPoint()
@@ -62,8 +57,4 @@ func TestNewTestcontainerApps(t *testing.T) {
endpoint, err = testApps.GetPoSL1EndPoint()
assert.EqualError(t, err, "PoS L1 container is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetWeb3SignerEndpoint()
assert.EqualError(t, err, "web3signer is not running")
assert.Empty(t, endpoint)
}

View File

@@ -1,7 +0,0 @@
type: "file-raw"
keyType: "SECP256K1"
privateKey: "0x1313131313131313131313131313131313131313131313131313131313131313"
---
type: "file-raw"
keyType: "SECP256K1"
privateKey: "0x1212121212121212121212121212121212121212121212121212121212121212"

View File

@@ -109,10 +109,6 @@ const (
ProverTaskFailureTypeVerifiedFailed
// ProverTaskFailureTypeServerError collect occur error
ProverTaskFailureTypeServerError
// ProverTaskFailureTypeObjectAlreadyVerified object(batch/chunk) already verified, may exists in test env when ENABLE_TEST_ENV_BYPASS_FEATURES is true
ProverTaskFailureTypeObjectAlreadyVerified
// ProverTaskFailureTypeReassignedByAdmin reassigned by admin, this value is used in admin-system and defined here for clarity
ProverTaskFailureTypeReassignedByAdmin
)
func (r ProverTaskFailureType) String() string {
@@ -127,10 +123,6 @@ func (r ProverTaskFailureType) String() string {
return "prover task failure verified failed"
case ProverTaskFailureTypeServerError:
return "prover task failure server exception"
case ProverTaskFailureTypeObjectAlreadyVerified:
return "prover task failure object already verified"
case ProverTaskFailureTypeReassignedByAdmin:
return "prover task failure reassigned by admin"
default:
return fmt.Sprintf("illegal prover task failure type (%d)", int32(r))
}
@@ -196,31 +188,6 @@ func (s ChunkProofsStatus) String() string {
}
}
// BatchProofsStatus describes the proving status of batches that belong to a bundle.
type BatchProofsStatus int
const (
// BatchProofsStatusUndefined represents an undefined batch proofs status
BatchProofsStatusUndefined BatchProofsStatus = iota
// BatchProofsStatusPending means that some batches that belong to this bundle have not been proven
BatchProofsStatusPending
// BatchProofsStatusReady means that all batches that belong to this bundle have been proven
BatchProofsStatusReady
)
func (s BatchProofsStatus) String() string {
switch s {
case BatchProofsStatusPending:
return "BatchProofsStatusPending"
case BatchProofsStatusReady:
return "BatchProofsStatusReady"
default:
return fmt.Sprintf("Undefined BatchProofsStatus (%d)", int32(s))
}
}
// RollupStatus block_batch rollup_status (pending, committing, committed, commit_failed, finalizing, finalized, finalize_skipped, finalize_failed)
type RollupStatus int

View File

@@ -0,0 +1,91 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
// HardForkName the hard fork name
HardForkName string `json:"hard_fork_name"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -0,0 +1,89 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type LegacyAuthMsg struct {
// Message fields
Identity *LegacyIdentity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// LegacyIdentity contains all the fields to be signed by the prover.
type LegacyIdentity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *LegacyAuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *LegacyAuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *LegacyIdentity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -1,18 +1,41 @@
package message
import (
"encoding/json"
"crypto/ecdsa"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// ProofFailureType the proof failure type
type ProofFailureType int
const (
euclidFork = "euclid"
// ProofFailureUndefined the undefined type proof failure type
ProofFailureUndefined ProofFailureType = iota
// ProofFailurePanic proof failure for prover panic
ProofFailurePanic
// ProofFailureNoPanic proof failure for no prover panic
ProofFailureNoPanic
)
// ProofType represents the type of task.
// RespStatus represents status code from prover to scroll
type RespStatus uint32
const (
// StatusOk means generate proof success
StatusOk RespStatus = iota
// StatusProofError means generate proof failed
StatusProofError
)
// ProofType represents the type of prover.
type ProofType uint8
func (r ProofType) String() string {
@@ -21,8 +44,6 @@ func (r ProofType) String() string {
return "proof type chunk"
case ProofTypeBatch:
return "proof type batch"
case ProofTypeBundle:
return "proof type bundle"
default:
return fmt.Sprintf("illegal proof type: %d", r)
}
@@ -31,14 +52,93 @@ func (r ProofType) String() string {
const (
// ProofTypeUndefined is an unknown proof type
ProofTypeUndefined ProofType = iota
// ProofTypeChunk generates a proof for a ZkEvm chunk, where the inputs are the execution traces for blocks contained in the chunk. ProofTypeChunk is the default proof type.
// ProofTypeChunk is default prover, it only generates zk proof from traces.
ProofTypeChunk
// ProofTypeBatch generates zk proof from chunk proofs
// ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
ProofTypeBatch
// ProofTypeBundle generates zk proof from batch proofs
ProofTypeBundle
)
// GenerateToken generates token
func GenerateToken() (string, error) {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
return "", err
}
return hex.EncodeToString(b), nil
}
// ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct {
*ProofDetail `json:"zkProof"`
// Prover signature
Signature string `json:"signature"`
// Prover public key
publicKey string
}
// Sign signs the ProofMsg.
func (a *ProofMsg) Sign(priv *ecdsa.PrivateKey) error {
hash, err := a.ProofDetail.Hash()
if err != nil {
return err
}
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies ProofMsg.Signature.
func (a *ProofMsg) Verify() (bool, error) {
hash, err := a.ProofDetail.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
// recover public key
if a.publicKey == "" {
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
}
return crypto.VerifySignature(common.FromHex(a.publicKey), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *ProofMsg) PublicKey() (string, error) {
if a.publicKey == "" {
hash, err := a.ProofDetail.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
return a.publicKey, nil
}
return a.publicKey, nil
}
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
UUID string `json:"uuid"`
ID string `json:"id"`
Type ProofType `json:"type,omitempty"`
BatchTaskDetail *BatchTaskDetail `json:"batch_task_detail,omitempty"`
ChunkTaskDetail *ChunkTaskDetail `json:"chunk_task_detail,omitempty"`
}
// ChunkTaskDetail is a type containing ChunkTask detail.
type ChunkTaskDetail struct {
BlockHashes []common.Hash `json:"block_hashes"`
@@ -46,31 +146,41 @@ type ChunkTaskDetail struct {
// BatchTaskDetail is a type containing BatchTask detail.
type BatchTaskDetail struct {
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []ChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
KzgProof []byte `json:"kzg_proof"`
KzgCommitment []byte `json:"kzg_commitment"`
Challenge common.Hash `json:"challenge"`
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []*ChunkProof `json:"chunk_proofs"`
}
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
type BundleTaskDetail struct {
BatchProofs []BatchProof `json:"batch_proofs"`
// ProofDetail is the message received from provers that contains zk proof, the status of
// the proof generation succeeded, and an error message if proof generation failed.
type ProofDetail struct {
ID string `json:"id"`
Type ProofType `json:"type,omitempty"`
Status RespStatus `json:"status"`
ChunkProof *ChunkProof `json:"chunk_proof,omitempty"`
BatchProof *BatchProof `json:"batch_proof,omitempty"`
Error string `json:"error,omitempty"`
}
// Hash return proofMsg content hash.
func (z *ProofDetail) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(z)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// ChunkInfo is for calculating pi_hash for chunk
type ChunkInfo struct {
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
TxBytesHash common.Hash `json:"tx_data_digest"`
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
}
// SubCircuitRowUsage tracing info added in v0.11.0rc8
@@ -79,26 +189,11 @@ type SubCircuitRowUsage struct {
RowNumber uint64 `json:"row_number"`
}
// ChunkProof
type ChunkProof interface {
Proof() []byte
}
// NewChunkProof creates a new ChunkProof instance.
func NewChunkProof(hardForkName string) ChunkProof {
switch hardForkName {
case euclidFork:
return &OpenVMChunkProof{}
default:
return &Halo2ChunkProof{}
}
}
// Halo2ChunkProof includes the proof info that are required for chunk verification and rollup.
type Halo2ChunkProof struct {
// ChunkProof includes the proof info that are required for chunk verification and rollup.
type ChunkProof struct {
StorageTrace []byte `json:"storage_trace,omitempty"`
Protocol []byte `json:"protocol"`
RawProof []byte `json:"proof"`
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
@@ -107,279 +202,27 @@ type Halo2ChunkProof struct {
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
}
// Proof returns the proof bytes of a ChunkProof
func (ap *Halo2ChunkProof) Proof() []byte {
return ap.RawProof
}
// BatchProof
type BatchProof interface {
SanityCheck() error
Proof() []byte
}
// NewBatchProof creates a new BatchProof instance.
func NewBatchProof(hardForkName string) BatchProof {
switch hardForkName {
case euclidFork:
return &OpenVMBatchProof{}
default:
return &Halo2BatchProof{}
}
}
// Halo2BatchProof includes the proof info that are required for batch verification and rollup.
type Halo2BatchProof struct {
Protocol []byte `json:"protocol"`
RawProof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
BatchHash common.Hash `json:"batch_hash"`
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes of a BatchProof
func (ap *Halo2BatchProof) Proof() []byte {
return ap.RawProof
}
// SanityCheck checks whether a BatchProof is in a legal format
func (ap *Halo2BatchProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.RawProof) == 0 {
return errors.New("proof not ready")
}
if len(ap.RawProof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
}
// BundleProof
type BundleProof interface {
SanityCheck() error
Proof() []byte
}
// NewBundleProof creates a new BundleProof instance.
func NewBundleProof(hardForkName string) BundleProof {
switch hardForkName {
case euclidFork:
return &OpenVMBundleProof{}
default:
return &Halo2BundleProof{}
}
}
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
type Halo2BundleProof struct {
RawProof []byte `json:"proof"`
// BatchProof includes the proof info that are required for batch verification and rollup.
type BatchProof struct {
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes of a BundleProof
func (ap *Halo2BundleProof) Proof() []byte {
return ap.RawProof
}
// SanityCheck checks whether a BundleProof is in a legal format
func (ap *Halo2BundleProof) SanityCheck() error {
// SanityCheck checks whether an BatchProof is in a legal format
// TODO: change to check Proof&Instance when upgrading to snark verifier v0.4
func (ap *BatchProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.RawProof) == 0 {
if len(ap.Proof) == 0 {
return errors.New("proof not ready")
}
if len(ap.RawProof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.RawProof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
}
// Proof for flatten VM proof
type OpenVMProof struct {
Proof []byte `json:"proofs"`
PublicValues []byte `json:"public_values"`
}
// Proof for flatten EVM proof
type OpenVMEvmProof struct {
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
}
// OpenVMChunkProof includes the proof info that are required for chunk verification and rollup.
type OpenVMChunkProof struct {
MetaData struct {
ChunkInfo *ChunkInfo `json:"chunk_info"`
} `json:"metadata"`
VmProof *OpenVMProof `json:"proof"`
Vk []byte `json:"vk,omitempty"`
GitVersion string `json:"git_version,omitempty"`
}
func (p *OpenVMChunkProof) Proof() []byte {
proofJson, err := json.Marshal(p.VmProof)
if err != nil {
panic(fmt.Sprint("marshaling error", err))
}
return proofJson
}
// OpenVMBatchInfo is for calculating pi_hash for batch header
type OpenVMBatchInfo struct {
ParentBatchHash common.Hash `json:"parent_batch_hash"`
ParentStateRoot common.Hash `json:"parent_state_root"`
StateRoot common.Hash `json:"state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
BatchHash common.Hash `json:"batch_hash"`
ChainID uint64 `json:"chain_id"`
}
// BatchProof includes the proof info that are required for batch verification and rollup.
type OpenVMBatchProof struct {
MetaData struct {
BatchInfo *OpenVMBatchInfo `json:"batch_info"`
BatchHash common.Hash `json:"batch_hash"`
} `json:"metadata"`
VmProof *OpenVMProof `json:"proof"`
Vk []byte `json:"vk,omitempty"`
GitVersion string `json:"git_version,omitempty"`
}
func (p *OpenVMBatchProof) Proof() []byte {
proofJson, err := json.Marshal(p.VmProof)
if err != nil {
panic(fmt.Sprint("marshaling error", err))
}
return proofJson
}
// SanityCheck checks whether a BatchProof is in a legal format
func (ap *OpenVMBatchProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if ap.MetaData.BatchInfo == nil {
return errors.New("batch info not ready")
}
if ap.VmProof == nil {
return errors.New("proof not ready")
} else {
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
pf := ap.VmProof
if pf.Proof == nil {
return errors.New("proof data not ready")
}
if len(pf.PublicValues) == 0 {
return errors.New("proof public value not ready")
}
}
return nil
}
// OpenVMBundleInfo is for calculating pi_hash for bundle header
type OpenVMBundleInfo struct {
ChainID uint64 `json:"chain_id"`
PrevStateRoot common.Hash `json:"prev_state_root"`
PostStateRoot common.Hash `json:"post_state_root"`
WithdrawRoot common.Hash `json:"withdraw_root"`
NumBatches uint32 `json:"num_batches"`
PrevBatchHash common.Hash `json:"prev_batch_hash"`
BatchHash common.Hash `json:"batch_hash"`
}
// OpenVMBundleProof includes the proof info that are required for verification of a bundle of batch proofs.
type OpenVMBundleProof struct {
MetaData struct {
BundleInfo *OpenVMBundleInfo `json:"bundle_info"`
BunndlePIHash common.Hash `json:"bundle_pi_hash"`
} `json:"metadata"`
EvmProof *OpenVMEvmProof `json:"proof"`
Vk []byte `json:"vk,omitempty"`
GitVersion string `json:"git_version,omitempty"`
}
// Proof returns the proof bytes that are eventually passed as calldata for on-chain bundle proof verification.
//
// There are 12 accumulators for a SNARK proof. The accumulators are the first 12 elements of the EvmProof's
// Instances field. The remaining items in Instances are supplied on-chain by the ScrollChain contract.
//
// The structure of these bytes is:
// | byte index start | byte length | value | description |
// |------------------|----------------|----------|---------------------|
// | 0 | 32 | accs[0] | accumulator 1 |
// | 32 | 32 | accs[1] | accumulator 2 |
// | 32*i ... | 32 | accs[i] | accumulator i ... |
// | 352 | 32 | accs[11] | accumulator 12 |
// | 384 | dynamic | proof | proof bytes |
func (p *OpenVMBundleProof) Proof() []byte {
proofBytes := make([]byte, 0, 384+len(p.EvmProof.Proof))
proofBytes = append(proofBytes, p.EvmProof.Instances[:384]...)
return append(proofBytes, p.EvmProof.Proof...)
}
// SanityCheck checks whether a BundleProof is in a legal format
func (ap *OpenVMBundleProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if ap.MetaData.BundleInfo == nil {
return errors.New("bundle info not ready")
}
if ap.EvmProof == nil {
return errors.New("proof not ready")
} else {
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
pf := ap.EvmProof
if len(pf.Proof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(pf.Proof))
}
if len(pf.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Proof)%32 != 0 {
return fmt.Errorf("proof buffer has wrong length, expected: 32, got: %d", len(ap.Proof))
}
return nil

View File

@@ -0,0 +1,158 @@
package message
import (
"encoding/hex"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
func TestAuthMessageSignAndVerify(t *testing.T) {
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
authMsg := &AuthMsg{
Identity: &Identity{
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzgxNzUsIm9yaWdfaWF0IjoxNjkxMDM0NTc1fQ.HybBMsEJFhyZqtIa2iVcHUP7CEFttf708jmTMAImAWA",
ProverName: "test",
ProverVersion: "v1.0.0",
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
// Check public key.
pk, err := authMsg.PublicKey()
assert.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
ok, err := authMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
// Check public key is ok.
pub, err := authMsg.PublicKey()
assert.NoError(t, err)
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
}
func TestGenerateToken(t *testing.T) {
token, err := GenerateToken()
assert.NoError(t, err)
assert.Equal(t, 32, len(token))
}
func TestIdentityHash(t *testing.T) {
identity := &Identity{
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzM0MTksIm9yaWdfaWF0IjoxNjkxMDI5ODE5fQ.EhkLZsj__rNPVC3ZDYBtvdh0nB8mmM_Hl82hObaIWOs",
ProverName: "test",
ProverVersion: "v1.0.0",
}
hash, err := identity.Hash()
assert.NoError(t, err)
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}
func TestProofMessageSignVerifyPublicKey(t *testing.T) {
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
proofMsg := &ProofMsg{
ProofDetail: &ProofDetail{
ID: "testID",
Type: ProofTypeChunk,
Status: StatusOk,
ChunkProof: &ChunkProof{
StorageTrace: []byte("testStorageTrace"),
Protocol: []byte("testProtocol"),
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
},
}
assert.NoError(t, proofMsg.Sign(privkey))
// Test when publicKey is not set.
ok, err := proofMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
// Test when publicKey is already set.
ok, err = proofMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
}
func TestProofDetailHash(t *testing.T) {
proofDetail := &ProofDetail{
ID: "testID",
Type: ProofTypeChunk,
Status: StatusOk,
ChunkProof: &ChunkProof{
StorageTrace: []byte("testStorageTrace"),
Protocol: []byte("testProtocol"),
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
}
hash, err := proofDetail.Hash()
assert.NoError(t, err)
expectedHash := "01128ea9006601146ba80dbda959c96ebaefca463e78570e473a57d821db5ec1"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}
func TestProveTypeString(t *testing.T) {
proofTypeChunk := ProofType(1)
assert.Equal(t, "proof type chunk", proofTypeChunk.String())
proofTypeBatch := ProofType(2)
assert.Equal(t, "proof type batch", proofTypeBatch.String())
illegalProof := ProofType(3)
assert.Equal(t, "illegal proof type: 3", illegalProof.String())
}
func TestProofMsgPublicKey(t *testing.T) {
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
proofMsg := &ProofMsg{
ProofDetail: &ProofDetail{
ID: "testID",
Type: ProofTypeChunk,
Status: StatusOk,
ChunkProof: &ChunkProof{
StorageTrace: []byte("testStorageTrace"),
Protocol: []byte("testProtocol"),
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
},
}
assert.NoError(t, proofMsg.Sign(privkey))
// Test when publicKey is not set.
pk, err := proofMsg.PublicKey()
assert.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
// Test when publicKey is already set.
proofMsg.publicKey = common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey))
pk, err = proofMsg.PublicKey()
assert.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
}

View File

@@ -21,7 +21,6 @@ var (
// RollupRelayerFlags contains flags only used in rollup-relayer
RollupRelayerFlags = []cli.Flag{
&ImportGenesisFlag,
&MinCodecVersionFlag,
}
// ConfigFileFlag load json type config file.
ConfigFileFlag = cli.StringFlag{
@@ -91,10 +90,4 @@ var (
Usage: "Genesis file of the network",
Value: "./conf/genesis.json",
}
// MinCodecVersionFlag defines the minimum codec version required for the chunk/batch/bundle proposers
MinCodecVersionFlag = cli.UintFlag{
Name: "min-codec-version",
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
Required: true,
}
)

View File

@@ -2,7 +2,6 @@ package utils
import (
"crypto/ecdsa"
"errors"
"fmt"
"os"
"path/filepath"
@@ -29,7 +28,7 @@ func LoadOrCreateKey(keystorePath string, keystorePassword string) (*ecdsa.Priva
} else if err != nil {
return nil, err
} else if fi.IsDir() {
return nil, errors.New("keystorePath cannot be a dir")
return nil, fmt.Errorf("keystorePath cannot be a dir")
}
keyjson, err := os.ReadFile(filepath.Clean(keystorePath))

View File

@@ -9,14 +9,10 @@ import (
"math/big"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"time"
"github.com/modern-go/reflect2"
"github.com/scroll-tech/go-ethereum/core"
"github.com/scroll-tech/go-ethereum/log"
)
// TryTimes try run several times until the function return true.
@@ -82,89 +78,3 @@ func ReadGenesis(genesisPath string) (*core.Genesis, error) {
}
return genesis, file.Close()
}
// OverrideConfigWithEnv recursively overrides config values with environment variables
func OverrideConfigWithEnv(cfg interface{}, prefix string) error {
v := reflect.ValueOf(cfg)
if v.Kind() != reflect.Ptr || v.IsNil() {
return nil
}
v = v.Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
fieldValue := v.Field(i)
if !fieldValue.CanSet() {
continue
}
tag := field.Tag.Get("json")
if tag == "" {
tag = strings.ToLower(field.Name)
}
envKey := prefix + "_" + strings.ToUpper(tag)
switch fieldValue.Kind() {
case reflect.Ptr:
if !fieldValue.IsNil() {
err := OverrideConfigWithEnv(fieldValue.Interface(), envKey)
if err != nil {
return err
}
}
case reflect.Struct:
err := OverrideConfigWithEnv(fieldValue.Addr().Interface(), envKey)
if err != nil {
return err
}
default:
if envValue, exists := os.LookupEnv(envKey); exists {
log.Info("Overriding config with env var", "key", envKey)
err := setField(fieldValue, envValue)
if err != nil {
return err
}
}
}
}
return nil
}
// setField sets the value of a field based on the environment variable value
func setField(field reflect.Value, value string) error {
switch field.Kind() {
case reflect.String:
field.SetString(value)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
intValue, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return err
}
field.SetInt(intValue)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
uintValue, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return err
}
field.SetUint(uintValue)
case reflect.Float32, reflect.Float64:
floatValue, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
field.SetFloat(floatValue)
case reflect.Bool:
boolValue, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(boolValue)
default:
return fmt.Errorf("unsupported type: %v", field.Kind())
}
return nil
}

View File

@@ -9,10 +9,6 @@ import (
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
if strings.HasPrefix(proverVersion, "sdk") {
return CheckProverSDKVersion(proverVersion)
}
// note the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
@@ -27,18 +23,8 @@ func CheckScrollProverVersion(proverVersion string) bool {
return remote[2] == local[2]
}
// CheckProverSDKVersion check prover sdk version, it simply returns true for now,
// and more checks will be added as we evolve.
func CheckProverSDKVersion(proverVersion string) bool {
return true
}
// CheckScrollRepoVersion checks if the proverVersion is at least the minimum required version.
func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
if strings.HasPrefix(proverVersion, "sdk") {
return CheckProverSDKWithMinVersion(proverVersion, minVersion)
}
c, err := semver.NewConstraint(">= " + minVersion + "-0")
if err != nil {
log.Error("failed to initialize constraint", "minVersion", minVersion, "error", err)
@@ -53,9 +39,3 @@ func CheckScrollRepoVersion(proverVersion, minVersion string) bool {
return c.Check(v)
}
// CheckProverSDKWithMinVersion check prover sdk version is at least the minimum required version, it simply returns true for now,
// and more checks will be added as we evolve.
func CheckProverSDKWithMinVersion(proverVersion string, minVersion string) bool {
return true
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.97"
var tag = "v4.4.23"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -26,9 +26,6 @@ coordinator_api: libzkp ## Builds the Coordinator api instance.
coordinator_cron:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_api_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
@@ -57,4 +54,4 @@ docker:
docker_push:
docker push scrolltech/coordinator-api:${IMAGE_VERSION}
docker push scrolltech/coordinator-cron:${IMAGE_VERSION}
docker push scrolltech/coordinator-cron:${IMAGE_VERSION}

View File

@@ -88,25 +88,13 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
}
// Reset prover manager config for manager test cases.
cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{
MockMode: true,
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwin",
MinProverVersion: "v4.2.0",
},
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwinV2",
MinProverVersion: "v4.3.0",
},
},
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
BatchCollectionTimeSec: 60,
ChunkCollectionTimeSec: 60,
SessionAttempts: 10,
MaxVerifierWorkers: 4,
MinProverVersion: "v1.0.0",
}
endpoint, err := c.testApps.GetDBEndPoint()
if err != nil {

View File

@@ -1,101 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
var app *cli.App
func init() {
// Set up coordinator app info.
app = cli.NewApp()
app.Action = action
app.Name = "coordinator-tool"
app.Usage = "The Scroll L2 Coordinator Tool"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
if err = database.CloseDB(db); err != nil {
log.Error("can not close db connection", "error", err)
}
}()
batchOrm := orm.NewBatch(db)
taskID := "fa9a290c8f1a46dc626fa67d626fadfe4803968ce776383996f3ae12504a2591"
batches, err := batchOrm.GetBatchesByBundleHash(ctx.Context, taskID)
if err != nil {
log.Error("failed to get batch proofs for batch", "task_id", taskID, "error", err)
return err
}
if len(batches) == 0 {
log.Error("failed to get batch proofs for bundle, not found batch", "task_id", taskID)
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
}
var batchProofs []message.BatchProof
for _, batch := range batches {
proof := message.NewBatchProof("darwinV2")
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
log.Error("failed to unmarshal batch proof")
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
}
batchProofs = append(batchProofs, proof)
}
taskDetail := message.BundleTaskDetail{
BatchProofs: batchProofs,
}
batchProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
log.Error("failed to marshal batch proof")
return fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", taskID, err)
}
taskMsg := &coordinatorType.GetTaskSchema{
TaskID: taskID,
TaskType: int(message.ProofTypeBundle),
TaskData: string(batchProofsBytes),
}
log.Info("task_msg", "data", taskMsg)
return nil
}
func main() {
// RunApp the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -2,25 +2,16 @@
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"external_prover_threshold": 32,
"bundle_collection_time_sec": 180,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"fork_name": "bernoulli",
"mock_mode": true,
"low_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwin",
"min_prover_version": "v4.4.43"
},
"high_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwinV2",
"min_prover_version": "v4.4.45"
}
}
"params_path": "",
"assets_path": ""
},
"max_verifier_workers": 4,
"min_prover_version": "v1.0.0"
},
"db": {
"driver_name": "postgres",
@@ -33,7 +24,7 @@
},
"auth": {
"secret": "prover secret key",
"challenge_expire_duration_sec": 3600,
"challenge_expire_duration_sec": 10,
"login_expire_duration_sec": 3600
}
}

View File

@@ -1,20 +1,15 @@
module scroll-tech/coordinator
go 1.22
toolchain go1.22.2
go 1.21
require (
github.com/appleboy/gin-jwt/v2 v2.9.1
github.com/gin-gonic/gin v1.9.1
github.com/go-resty/resty/v2 v2.7.0
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.10.0
github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7
golang.org/x/arch v0.5.0 // indirect
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
@@ -42,43 +37,48 @@ require (
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/net v0.20.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
)
require (
github.com/google/uuid v1.6.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/bits-and-blooms/bitset v1.12.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/consensys/bavard v0.1.29 // indirect
github.com/consensys/gnark-crypto v0.16.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/ethereum/c-kzg-4844 v1.0.3 // indirect
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/holiman/uint256 v1.3.2 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/holiman/uint256 v1.2.4 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/supranational/blst v0.3.13 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.17.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

View File

@@ -1,16 +1,14 @@
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38=
github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -32,21 +30,21 @@ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo=
github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4=
github.com/crate-crypto/go-kzg-4844 v1.1.0/go.mod h1:JolLjpSff1tCCJKaJx4psrlEdlXuJEC996PL3tTAFks=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs=
github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
@@ -95,11 +93,11 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA=
github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
@@ -109,8 +107,6 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
@@ -124,8 +120,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
@@ -173,16 +169,16 @@ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435 h1:X9fkvjrYBY79lGgKEPpUhuiJ4vWpWwzOVw4H8CU8L54=
github.com/scroll-tech/da-codec v0.1.3-0.20250310095435-012aaee6b435/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570 h1:2oA2bAFPQXDZcUK8TA9qd5zj6AsURpHyBaAha5goP0c=
github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
@@ -198,10 +194,10 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
@@ -210,10 +206,10 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
@@ -225,8 +221,8 @@ github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6S
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
@@ -236,8 +232,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -247,13 +243,13 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -269,8 +265,10 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
@@ -279,8 +277,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=

View File

@@ -6,7 +6,6 @@ import (
"path/filepath"
"scroll-tech/common/database"
"scroll-tech/common/utils"
)
// ProverManager loads sequencer configuration items.
@@ -16,16 +15,16 @@ type ProverManager struct {
// Number of attempts that a session can be retried if previous attempts failed.
// Currently we only consider proving timeout as failure here.
SessionAttempts uint8 `json:"session_attempts"`
// Threshold for activating the external prover based on unassigned task count.
ExternalProverThreshold int64 `json:"external_prover_threshold"`
// Zk verifier config.
Verifier *VerifierConfig `json:"verifier"`
// BatchCollectionTimeSec batch Proof collection time (in seconds).
BatchCollectionTimeSec int `json:"batch_collection_time_sec"`
// ChunkCollectionTimeSec chunk Proof collection time (in seconds).
ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"`
// BundleCollectionTimeSec bundle Proof collection time (in seconds).
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"`
// Max number of workers in verifier worker pool
MaxVerifierWorkers int `json:"max_verifier_workers"`
// MinProverVersion is the minimum version of the prover that is required.
MinProverVersion string `json:"min_prover_version"`
}
// L2 loads l2geth configuration items.
@@ -49,19 +48,12 @@ type Config struct {
Auth *Auth `json:"auth"`
}
// CircuitConfig circuit items.
type CircuitConfig struct {
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
ForkName string `json:"fork_name"`
MinProverVersion string `json:"min_prover_version"`
}
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
MockMode bool `json:"mock_mode"`
LowVersionCircuit *CircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"`
ForkName string `json:"fork_name"`
MockMode bool `json:"mock_mode"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
}
// NewConfig returns a new instance of Config.
@@ -77,11 +69,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR")
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -1,16 +1,15 @@
package api
import (
"errors"
"fmt"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/logic/auth"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/types"
)
@@ -20,9 +19,9 @@ type AuthController struct {
}
// NewAuthController returns an LoginController instance
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController {
func NewAuthController(db *gorm.DB) *AuthController {
return &AuthController{
loginLogic: auth.NewLoginLogic(db, cfg, vf),
loginLogic: auth.NewLoginLogic(db),
}
}
@@ -37,16 +36,7 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
// if not exist, the jwt token will intercept it
brearToken := c.GetHeader("Authorization")
if brearToken != "Bearer "+login.Message.Challenge {
return "", errors.New("check challenge failure for the not equal challenge string")
}
if err := a.loginLogic.Check(&login); err != nil {
return "", fmt.Errorf("check the login parameter failure: %w", err)
}
hardForkNames, err := a.loginLogic.ProverHardForkName(&login)
if err != nil {
return "", fmt.Errorf("prover hard fork name failure:%w", err)
return "", fmt.Errorf("check challenge failure for the not equal challenge string")
}
// check the challenge is used, if used, return failure
@@ -54,27 +44,54 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
return "", fmt.Errorf("login insert challenge string failure:%w", err)
}
returnData := types.LoginParameterWithHardForkName{
HardForkName: hardForkNames,
LoginParameter: login,
}
return returnData, nil
return login, nil
}
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
v, ok := data.(types.LoginParameterWithHardForkName)
v, ok := data.(types.LoginParameter)
if !ok {
return jwt.MapClaims{}
}
var publicKey string
var err error
if v.Message.HardForkName != "" {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
HardForkName: v.Message.HardForkName,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
}
if err != nil {
return jwt.MapClaims{}
}
if v.Message.HardForkName == "" {
v.Message.HardForkName = "shanghai"
}
return jwt.MapClaims{
types.HardForkName: v.HardForkName,
types.PublicKey: v.PublicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.ProverProviderTypeKey: v.Message.ProverProviderType,
types.PublicKey: publicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.HardForkName: v.Message.HardForkName,
}
}
@@ -96,10 +113,5 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
if hardForkName, ok := claims[types.HardForkName]; ok {
c.Set(types.HardForkName, hardForkName)
}
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
c.Set(types.ProverProviderTypeKey, providerType)
}
return nil
}

View File

@@ -26,9 +26,9 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
panic("proof receiver new verifier failure")
}
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap, "bundleVerifier", vf.BundleVkMap, "openVmVerifier", vf.OpenVMVkMap)
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
Auth = NewAuthController(db, cfg, vf)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg)
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg)
Auth = NewAuthController(db)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
}

View File

@@ -1,7 +1,6 @@
package api
import (
"errors"
"fmt"
"math/rand"
@@ -17,6 +16,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types"
)
@@ -28,10 +28,9 @@ type GetTaskController struct {
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg)
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg)
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
@@ -43,22 +42,22 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask
return ptc
}
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return errors.New("get public key from context failed")
return fmt.Errorf("get public key from context failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return errors.New("get prover name from context failed")
return fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return errors.New("get prover version from context failed")
return fmt.Errorf("get prover version from context failed")
}
ptc.getTaskAccessCounter.With(prometheus.Labels{
@@ -98,7 +97,7 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
}
if result == nil {
nerr := errors.New("get empty prover task")
nerr := fmt.Errorf("get empty prover task")
types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, nerr)
return
}
@@ -107,21 +106,18 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
}
func (ptc *GetTaskController) proofType(para *coordinatorType.GetTaskParameter) message.ProofType {
var proofTypes []message.ProofType
for _, proofType := range para.TaskTypes {
proofTypes = append(proofTypes, message.ProofType(proofType))
proofType := message.ProofType(para.TaskType)
proofTypes := []message.ProofType{
message.ProofTypeChunk,
message.ProofTypeBatch,
}
if len(proofTypes) == 0 {
proofTypes = []message.ProofType{
message.ProofTypeChunk,
message.ProofTypeBatch,
message.ProofTypeBundle,
}
if proofType == message.ProofTypeUndefined {
rand.Shuffle(len(proofTypes), func(i, j int) {
proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i]
})
proofType = proofTypes[0]
}
rand.Shuffle(len(proofTypes), func(i, j int) {
proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i]
})
return proofTypes[0]
return proofType
}

View File

@@ -1,14 +1,15 @@
package api
import (
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/submitproof"
@@ -22,9 +23,9 @@ type SubmitProofController struct {
}
// NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController {
func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController {
return &SubmitProofController{
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, chainCfg, db, vf, reg),
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, vf, reg),
}
}
@@ -37,7 +38,36 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
return
}
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, spp); err != nil {
proofMsg := message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: spp.TaskID,
Type: message.ProofType(spp.TaskType),
Status: message.RespStatus(spp.Status),
},
}
if spp.Status == int(message.StatusOk) {
switch message.ProofType(spp.TaskType) {
case message.ProofTypeChunk:
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
proofMsg.ChunkProof = &tmpChunkProof
case message.ProofTypeBatch:
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
proofMsg.BatchProof = &tmpBatchProof
}
}
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr)
return

View File

@@ -23,55 +23,38 @@ type Collector struct {
db *gorm.DB
ctx context.Context
stopBundleTimeoutChan chan struct{}
stopChunkTimeoutChan chan struct{}
stopBatchTimeoutChan chan struct{}
stopBatchAllChunkReadyChan chan struct{}
stopBundleAllBatchReadyChan chan struct{}
stopCleanChallengeChan chan struct{}
stopChunkTimeoutChan chan struct{}
stopBatchTimeoutChan chan struct{}
stopBatchAllChunkReadyChan chan struct{}
stopCleanChallengeChan chan struct{}
proverTaskOrm *orm.ProverTask
bundleOrm *orm.Bundle
chunkOrm *orm.Chunk
batchOrm *orm.Batch
challenge *orm.Challenge
timeoutBundleCheckerRunTotal prometheus.Counter
bundleProverTaskTimeoutTotal prometheus.Counter
timeoutBatchCheckerRunTotal prometheus.Counter
batchProverTaskTimeoutTotal prometheus.Counter
timeoutChunkCheckerRunTotal prometheus.Counter
chunkProverTaskTimeoutTotal prometheus.Counter
checkBatchAllChunkReadyRunTotal prometheus.Counter
checkBundleAllBatchReadyRunTotal prometheus.Counter
timeoutBatchCheckerRunTotal prometheus.Counter
batchProverTaskTimeoutTotal prometheus.Counter
timeoutChunkCheckerRunTotal prometheus.Counter
chunkProverTaskTimeoutTotal prometheus.Counter
checkBatchAllChunkReadyRunTotal prometheus.Counter
}
// NewCollector create a collector to cron collect the data to send to prover
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
c := &Collector{
cfg: cfg,
db: db,
ctx: ctx,
stopBundleTimeoutChan: make(chan struct{}),
stopChunkTimeoutChan: make(chan struct{}),
stopBatchTimeoutChan: make(chan struct{}),
stopBatchAllChunkReadyChan: make(chan struct{}),
stopBundleAllBatchReadyChan: make(chan struct{}),
stopCleanChallengeChan: make(chan struct{}),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
challenge: orm.NewChallenge(db),
cfg: cfg,
db: db,
ctx: ctx,
stopChunkTimeoutChan: make(chan struct{}),
stopBatchTimeoutChan: make(chan struct{}),
stopBatchAllChunkReadyChan: make(chan struct{}),
stopCleanChallengeChan: make(chan struct{}),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
challenge: orm.NewChallenge(db),
timeoutBundleCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_bundle_timeout_checker_run_total",
Help: "Total number of bundle timeout checker run.",
}),
bundleProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_bundle_prover_task_timeout_total",
Help: "Total number of bundle timeout prover task.",
}),
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_timeout_checker_run_total",
Help: "Total number of batch timeout checker run.",
@@ -92,17 +75,11 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
Name: "coordinator_check_batch_all_chunk_ready_run_total",
Help: "Total number of check batch all chunks ready total",
}),
checkBundleAllBatchReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_check_bundle_all_batch_ready_run_total",
Help: "Total number of check bundle all batches ready total",
}),
}
go c.timeoutBundleProofTask()
go c.timeoutBatchProofTask()
go c.timeoutChunkProofTask()
go c.checkBatchAllChunkReady()
go c.checkBundleAllBatchReady()
go c.cleanupChallenge()
log.Info("Start coordinator cron successfully.")
@@ -114,45 +91,10 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
func (c *Collector) Stop() {
c.stopChunkTimeoutChan <- struct{}{}
c.stopBatchTimeoutChan <- struct{}{}
c.stopBundleTimeoutChan <- struct{}{}
c.stopBatchAllChunkReadyChan <- struct{}{}
c.stopCleanChallengeChan <- struct{}{}
}
// timeoutBundleProofTask cron checks the send task is timeout. if timeout reached, restore the
// bundle task to unassigned. then the bundle collector can retry it.
func (c *Collector) timeoutBundleProofTask() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("timeout bundle proof task panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 2)
for {
select {
case <-ticker.C:
c.timeoutBundleCheckerRunTotal.Inc()
timeout := time.Duration(c.cfg.ProverManager.BundleCollectionTimeSec) * time.Second
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeBundle, timeout)
if err != nil {
log.Error("get unassigned session info failure", "error", err)
break
}
c.check(assignedProverTasks, c.bundleProverTaskTimeoutTotal)
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopBundleTimeoutChan:
log.Info("the coordinator timeoutBundleProofTask run loop exit")
return
}
}
}
// timeoutBatchProofTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutBatchProofTask() {
@@ -260,16 +202,6 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
case message.ProofTypeBundle:
if err := c.bundleOrm.DecreaseActiveAttemptsByHash(c.ctx, assignedProverTask.TaskID, tx); err != nil {
log.Error("decrease bundle active attempts failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
if err := c.bundleOrm.UpdateProvingStatusFailed(c.ctx, assignedProverTask.TaskID, c.cfg.ProverManager.SessionAttempts, tx); err != nil {
log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
}
return nil
@@ -336,60 +268,3 @@ func (c *Collector) checkBatchAllChunkReady() {
}
}
}
func (c *Collector) checkBundleAllBatchReady() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("check batch all batches ready panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 10)
for {
select {
case <-ticker.C:
c.checkBundleAllBatchReadyRunTotal.Inc()
page := 1
pageSize := 50
for {
offset := (page - 1) * pageSize
bundles, err := c.bundleOrm.GetUnassignedAndBatchesUnreadyBundles(c.ctx, offset, pageSize)
if err != nil {
log.Warn("checkBundleAllBatchReady GetUnassignedAndBatchesUnreadyBundles", "error", err)
break
}
for _, bundle := range bundles {
allReady, checkErr := c.batchOrm.CheckIfBundleBatchProofsAreReady(c.ctx, bundle.Hash)
if checkErr != nil {
log.Warn("checkBundleAllBatchReady CheckIfBundleBatchProofsAreReady failure", "error", checkErr, "hash", bundle.Hash)
continue
}
if !allReady {
continue
}
if updateErr := c.bundleOrm.UpdateBatchProofsStatusByBundleHash(c.ctx, bundle.Hash, types.BatchProofsStatusReady); updateErr != nil {
log.Warn("checkBundleAllBatchReady UpdateBatchProofsStatusByBundleHash failure", "error", checkErr, "hash", bundle.Hash)
}
}
if len(bundles) < pageSize {
break
}
page++
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopBundleAllBatchReadyChan:
log.Info("the coordinator checkBundleAllBatchReady run loop exit")
return
}
}
}

View File

@@ -1,62 +1,21 @@
package auth
import (
"errors"
"fmt"
"strings"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/internal/types"
)
// LoginLogic the auth logic
type LoginLogic struct {
cfg *config.Config
challengeOrm *orm.Challenge
chunkVks map[string]struct{}
batchVKs map[string]struct{}
bundleVks map[string]struct{}
openVmVks map[string]struct{}
proverVersionHardForkMap map[string][]string
}
// NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic {
proverVersionHardForkMap := make(map[string][]string)
if version.CheckScrollRepoVersion(cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
log.Error("config file error, low verifier min_prover_version should not more than high verifier min_prover_version",
"low verifier min_prover_version", cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion,
"high verifier min_prover_version", cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion)
panic("verifier config file error")
}
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
if cfg.ProverManager.Verifier.HighVersionCircuit.ForkName != "euclid" {
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
}
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}
func NewLoginLogic(db *gorm.DB) *LoginLogic {
return &LoginLogic{
cfg: cfg,
chunkVks: vf.ChunkVKMap,
batchVKs: vf.BatchVKMap,
bundleVks: vf.BundleVkMap,
openVmVks: vf.OpenVMVkMap,
challengeOrm: orm.NewChallenge(db),
proverVersionHardForkMap: proverVersionHardForkMap,
challengeOrm: orm.NewChallenge(db),
}
}
@@ -64,82 +23,3 @@ func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *Logi
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
}
func (l *LoginLogic) Check(login *types.LoginParameter) error {
verify, err := login.Verify()
if err != nil || !verify {
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
return errors.New("auth message verify failure")
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, login.Message.ProverVersion)
}
if len(login.Message.ProverTypes) > 0 {
vks := make(map[string]struct{})
for _, proverType := range login.Message.ProverTypes {
switch proverType {
case types.ProverTypeChunk:
for vk := range l.chunkVks {
vks[vk] = struct{}{}
}
case types.ProverTypeBatch:
for vk := range l.batchVKs {
vks[vk] = struct{}{}
}
for vk := range l.bundleVks {
vks[vk] = struct{}{}
}
case types.ProverTypeOpenVM:
for vk := range l.openVmVks {
vks[vk] = struct{}{}
}
default:
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
}
}
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
}
}
}
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
// for backward compatibility, set ProverProviderType as internal
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
login.Message.ProverProviderType = types.ProverProviderTypeInternal
} else {
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
return errors.New("invalid prover provider type.")
}
}
return nil
}
// ProverHardForkName retrieves hard fork name which prover belongs to
func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, error) {
proverVersionSplits := strings.Split(login.Message.ProverVersion, "-")
if len(proverVersionSplits) == 0 {
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
}
proverVersion := proverVersionSplits[0]
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
return strings.Join(hardForkNames, ","), nil
}
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
}

View File

@@ -4,17 +4,18 @@ import (
"context"
"encoding/json"
"fmt"
"math"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/forks"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
@@ -22,30 +23,39 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
cutils "scroll-tech/coordinator/internal/utils"
)
// BatchProverTask is prover task implement for batch proof
type BatchProverTask struct {
BaseProverTask
batchTaskGetTaskTotal *prometheus.CounterVec
batchTaskGetTaskProver *prometheus.CounterVec
batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal *prometheus.CounterVec
batchTaskGetTaskProver *prometheus.CounterVec
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
vkMap: vkMap,
reverseVkMap: reverseMap(vkMap),
db: db,
cfg: cfg,
chainCfg: chainCfg,
blockOrm: orm.NewL2Block(db),
nameForkMap: nameForkMap,
forkHeights: forkHeights,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
proverBlockListOrm: orm.NewProverBlockList(db),
},
batchAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_attempts_exceed_total",
Help: "Total number of batch attempts exceed.",
}),
batchTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_batch_get_task_total",
Help: "Total number of batch get task.",
@@ -55,33 +65,38 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
return bp
}
// Assign load and assign batch tasks
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := bp.checkParameter(ctx)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
type chunkIndexRange struct {
start uint64
end uint64
}
func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange {
var start, end = r.start, r.end
if o.start < r.start {
start = o.start
}
if o.end > r.end {
end = o.end
}
return &chunkIndexRange{start, end}
}
func (r *chunkIndexRange) contains(start, end uint64) bool {
return r.start <= start && r.end > end
}
type getHardForkNameByBatchFunc func(*orm.Batch) (string, error)
func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCtx *proverTaskContext,
chunkRange *chunkIndexRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByBatchFunc) (*coordinatorType.GetTaskSchema, error) {
startChunkIndex, endChunkIndex := chunkRange.start, chunkRange.end
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
unassignedBatchCount, getCountError := bp.batchOrm.GetUnassignedBatchCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getCountError != nil {
log.Error("failed to get unassigned batch proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
return nil, ErrCoordinatorInternalFailure
}
// Assign external prover if unassigned task number exceeds threshold
if unassignedBatchCount < bp.cfg.ProverManager.ExternalProverThreshold {
return nil, nil
}
}
var batchTask *orm.Batch
var hardForkName string
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -90,7 +105,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -102,30 +117,6 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
taskCtx.taskType = message.ProofTypeBatch
taskCtx.batchTask = tmpBatchTask
var checkErr error
hardForkName, checkErr = bp.hardForkSanityCheck(ctx, taskCtx)
if checkErr != nil {
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
@@ -147,15 +138,28 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
var (
proverVersion = taskCtx.ProverVersion
hardForkName = taskCtx.HardForkName
)
var err error
if getHardForkName != nil {
hardForkName, err = getHardForkName(batchTask)
if err != nil {
log.Error("failed to get hard fork name by batch", "task_id", batchTask.Hash, "error", err.Error())
return nil, ErrCoordinatorInternalFailure
}
}
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProverVersion: proverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
@@ -166,7 +170,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName)
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
@@ -183,7 +187,115 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, batch *orm.Batch, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
// so the hard fork chunk's start_block_number must be ForkBlockNumber
var startChunkIndex uint64 = 0
var endChunkIndex uint64 = math.MaxInt64
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", hardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if startChunk == nil {
return nil, nil
}
startChunkIndex = startChunk.Index
}
if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", hardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if toChunk != nil {
// toChunk being nil only indicates that we haven't yet reached the fork boundary
// don't need change the endChunkIndex of math.MaxInt64
endChunkIndex = toChunk.Index
}
}
return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil
}
func (bp *BatchProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
chunkRange, err := bp.getChunkRangeByName(ctx, taskCtx.HardForkName)
if err != nil {
return nil, err
}
if chunkRange == nil {
return nil, nil
}
return bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, nil)
}
func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
var (
hardForkNames [2]string
chunkRanges [2]*chunkIndexRange
err error
)
var chunkRange *chunkIndexRange
for i := 0; i < 2; i++ {
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
if err != nil {
return nil, err
}
if chunkRanges[i] != nil {
if chunkRange == nil {
chunkRange = chunkRanges[i]
} else {
chunkRange = chunkRange.merge(*chunkRanges[i])
}
}
}
if chunkRange == nil {
return nil, nil
}
var hardForkName string
getHardForkName := func(batch *orm.Batch) (string, error) {
for i := 0; i < 2; i++ {
if chunkRanges[i] != nil && chunkRanges[i].contains(batch.StartChunkIndex, batch.EndChunkIndex) {
hardForkName = hardForkNames[i]
break
}
}
if hardForkName == "" {
log.Warn("get batch not belongs to any hard fork name", "batch id", batch.Index)
return "", fmt.Errorf("get batch not belongs to any hard fork name, batch id: %d", batch.Index)
}
return hardForkName, nil
}
schema, err := bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, getHardForkName)
if schema != nil && err == nil {
schema.HardForkName = hardForkName
return schema, nil
}
return schema, err
}
// Assign load and assign batch tasks
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
if len(getTaskParameter.VKs) > 0 {
return bp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
}
return bp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
}
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// get chunk from db
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
if err != nil {
@@ -191,39 +303,32 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
return nil, err
}
if len(chunks) == 0 {
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
}
var chunkProofs []message.ChunkProof
var chunkProofs []*message.ChunkProof
var chunkInfos []*message.ChunkInfo
for _, chunk := range chunks {
proof := message.NewChunkProof(hardForkName)
var proof message.ChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, task.TaskID, chunk.Hash)
}
chunkProofs = append(chunkProofs, proof)
chunkProofs = append(chunkProofs, &proof)
chunkInfo := message.ChunkInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
IsPadding: false,
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
IsPadding: false,
}
if haloProot, ok := proof.(*message.Halo2ChunkProof); ok {
if haloProot.ChunkInfo != nil {
chunkInfo.TxBytes = haloProot.ChunkInfo.TxBytes
}
if proof.ChunkInfo != nil {
chunkInfo.TxBytes = proof.ChunkInfo.TxBytes
}
chunkInfos = append(chunkInfos, &chunkInfo)
}
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs)
if err != nil {
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err)
taskDetail := message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
chunkProofsBytes, err := json.Marshal(taskDetail)
@@ -232,56 +337,16 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
HardForkName: hardForkName,
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
}
return taskMsg, nil
}
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
if err := bp.batchOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
}
}
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof) (*message.BatchTaskDetail, error) {
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
switch dbBatchCodecVersion {
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6, encoding.CodecV7:
default:
return taskDetail, nil
}
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
if err != nil {
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
}
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
}
taskDetail.BatchHeader = batchHeader
taskDetail.BlobBytes = dbBatch.BlobBytes
if len(dbBatch.BlobDataProof) < 160 {
return nil, fmt.Errorf("blob data proof length is less than 160 bytes = %d, taskID: %s: %s", len(dbBatch.BlobDataProof), dbBatch.Hash, common.Bytes2Hex(dbBatch.BlobDataProof))
}
// Memory layout of `BlobDataProof`: used in Codec.BlobDataProofForPointEvaluation()
// | z | y | kzg_commitment | kzg_proof |
// |---------|---------|----------------|-----------|
// | bytes32 | bytes32 | bytes48 | bytes48 |
taskDetail.KzgProof = dbBatch.BlobDataProof[112:160]
taskDetail.KzgCommitment = dbBatch.BlobDataProof[64:112]
taskDetail.Challenge = common.Hash(dbBatch.BlobDataProof[0:32])
return taskDetail, nil
}

View File

@@ -1,229 +0,0 @@
package provertask
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
cutils "scroll-tech/coordinator/internal/utils"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
// BundleProverTask is prover task implement for bundle proof
type BundleProverTask struct {
BaseProverTask
bundleTaskGetTaskTotal *prometheus.CounterVec
bundleTaskGetTaskProver *prometheus.CounterVec
}
// NewBundleProverTask new a bundle collector
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProverTask {
bp := &BundleProverTask{
BaseProverTask: BaseProverTask{
db: db,
chainCfg: chainCfg,
cfg: cfg,
blockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
proverTaskOrm: orm.NewProverTask(db),
proverBlockListOrm: orm.NewProverBlockList(db),
},
bundleTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_bundle_get_task_total",
Help: "Total number of bundle get task.",
}, []string{"fork_name"}),
bundleTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "bundle"),
}
return bp
}
// Assign load and assign batch tasks
func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := bp.checkParameter(ctx)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
unassignedBundleCount, getCountError := bp.bundleOrm.GetUnassignedBundleCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getCountError != nil {
log.Error("failed to get unassigned bundle proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
return nil, ErrCoordinatorInternalFailure
}
// Assign external prover if unassigned task number exceeds threshold
if unassignedBundleCount < bp.cfg.ProverManager.ExternalProverThreshold {
return nil, nil
}
}
var bundleTask *orm.Bundle
var hardForkName string
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBundleTask *orm.Bundle
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// bundle to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpBundleTask == nil {
tmpBundleTask, getTaskError = bp.bundleOrm.GetUnassignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
if tmpBundleTask == nil {
log.Debug("get empty bundle", "height", getTaskParameter.ProverHeight)
return nil, nil
}
taskCtx.taskType = message.ProofTypeBundle
taskCtx.bundleTask = tmpBundleTask
var checkErr error
hardForkName, checkErr = bp.hardForkSanityCheck(ctx, taskCtx)
if checkErr != nil {
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
bundleTask = tmpBundleTask
break
}
if bundleTask == nil {
log.Debug("get empty unassigned bundle after retry 5 times", "height", getTaskParameter.ProverHeight)
return nil, nil
}
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
proverTask := orm.ProverTask{
TaskID: bundleTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBundle),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
bp.bundleTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.bundleTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil
}
func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
// get bundle from db
batches, err := bp.batchOrm.GetBatchesByBundleHash(ctx, task.TaskID)
if err != nil {
err = fmt.Errorf("failed to get batch proofs for batch task id:%s err:%w ", task.TaskID, err)
return nil, err
}
if len(batches) == 0 {
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
var batchProofs []message.BatchProof
for _, batch := range batches {
proof := message.NewBatchProof(hardForkName)
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
}
batchProofs = append(batchProofs, proof)
}
taskDetail := message.BundleTaskDetail{
BatchProofs: batchProofs,
}
batchProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", task.TaskID, err)
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBundle),
TaskData: string(batchProofsBytes),
HardForkName: hardForkName,
}
return taskMsg, nil
}
func (bp *BundleProverTask) recoverActiveAttempts(ctx *gin.Context, bundleTask *orm.Bundle) {
if err := bp.bundleOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), bundleTask.Hash); err != nil {
log.Error("failed to recover bundle active attempts", "hash", bundleTask.Hash, "error", err)
}
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/forks"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
@@ -20,29 +21,38 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
cutils "scroll-tech/coordinator/internal/utils"
)
// ChunkProverTask the chunk prover task
type ChunkProverTask struct {
BaseProverTask
chunkTaskGetTaskTotal *prometheus.CounterVec
chunkTaskGetTaskProver *prometheus.CounterVec
chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal *prometheus.CounterVec
chunkTaskGetTaskProver *prometheus.CounterVec
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
vkMap: vkMap,
reverseVkMap: reverseMap(vkMap),
db: db,
cfg: cfg,
chainCfg: chainCfg,
nameForkMap: nameForkMap,
forkHeights: forkHeights,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
proverBlockListOrm: orm.NewProverBlockList(db),
},
chunkAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_chunk_attempts_exceed_total",
Help: "Total number of chunk attempts exceed.",
}),
chunkTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_chunk_get_task_total",
Help: "Total number of chunk get task.",
@@ -52,33 +62,22 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
return cp
}
// Assign the chunk proof which need to prove
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := cp.checkParameter(ctx)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
type getHardForkNameByChunkFunc func(*orm.Chunk) (string, error)
func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCtx *proverTaskContext,
blockRange *blockRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByChunkFunc) (*coordinatorType.GetTaskSchema, error) {
fromBlockNum, toBlockNum := blockRange.from, blockRange.to
if toBlockNum > getTaskParameter.ProverHeight {
toBlockNum = getTaskParameter.ProverHeight + 1
}
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
unassignedChunkCount, getCountError := cp.chunkOrm.GetUnassignedChunkCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
if getCountError != nil {
log.Error("failed to get unassigned chunk proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
return nil, ErrCoordinatorInternalFailure
}
// Assign external prover if unassigned task number exceeds threshold
if unassignedChunkCount < cp.cfg.ProverManager.ExternalProverThreshold {
return nil, nil
}
}
var chunkTask *orm.Chunk
var hardForkName string
for i := 0; i < 5; i++ {
var getTaskError error
var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -87,7 +86,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
@@ -99,30 +98,6 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
taskCtx.taskType = message.ProofTypeChunk
taskCtx.chunkTask = tmpChunkTask
var checkErr error
hardForkName, checkErr = cp.hardForkSanityCheck(ctx, taskCtx)
if checkErr != nil {
log.Debug("hard fork sanity check failed", "height", getTaskParameter.ProverHeight, "err", checkErr)
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getFailedTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
if getFailedTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getFailedTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
@@ -144,15 +119,28 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
var (
proverVersion = taskCtx.ProverVersion
hardForkName = taskCtx.HardForkName
err error
)
if getHardForkName != nil {
hardForkName, err = getHardForkName(chunkTask)
if err != nil {
log.Error("failed to get hard fork name by chunk", "task_id", chunkTask.Hash, "error", err.Error())
return nil, ErrCoordinatorInternalFailure
}
}
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProverVersion: proverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
@@ -162,7 +150,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
@@ -179,7 +167,96 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
func (cp *ChunkProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
blockRange, err := cp.getBlockRangeByName(taskCtx.HardForkName)
if err != nil {
return nil, err
}
return cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, nil)
}
func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
var (
hardForkNames [2]string
blockRanges [2]*blockRange
err error
)
for i := 0; i < 2; i++ {
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
if err != nil {
return nil, err
}
}
blockRange, err := blockRanges[0].merge(*blockRanges[1])
if err != nil {
return nil, err
}
var hardForkName string
getHardForkName := func(chunk *orm.Chunk) (string, error) {
for i := 0; i < 2; i++ {
if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
hardForkName = hardForkNames[i]
break
}
}
if hardForkName == "" {
log.Warn("get chunk not belongs to any hard fork name", "chunk id", chunk.Index)
return "", fmt.Errorf("get chunk not belongs to any hard fork name, chunk id: %d", chunk.Index)
}
return hardForkName, nil
}
schema, err := cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, getHardForkName)
if schema != nil && err == nil {
schema.HardForkName = hardForkName
return schema, nil
}
return schema, err
}
type blockRange struct {
from uint64
to uint64
}
func (r *blockRange) merge(o blockRange) (*blockRange, error) {
if r.from == o.to {
return &blockRange{o.from, r.to}, nil
} else if r.to == o.from {
return &blockRange{r.from, o.to}, nil
}
return nil, fmt.Errorf("two ranges are not adjacent")
}
func (r *blockRange) contains(start, end uint64) bool {
return r.from <= start && r.to > end
}
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
return &blockRange{fromBlockNum, toBlockNum}, nil
}
// Assign the chunk proof which need to prove
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
if len(getTaskParameter.VKs) > 0 {
return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
}
return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 {
@@ -195,11 +272,10 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
proverTaskSchema := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes),
HardForkName: hardForkName,
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes),
}
return proverTaskSchema, nil

View File

@@ -1,19 +1,16 @@
package provertask
import (
"errors"
"fmt"
"strings"
"sync"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/params"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
@@ -22,12 +19,9 @@ import (
var (
// ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = errors.New("coordinator internal error")
)
var (
getTaskCounterInitOnce sync.Once
getTaskCounterVec *prometheus.CounterVec = nil
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name
ErrHardForkName = fmt.Errorf("wrong hard fork name")
)
// ProverTask the interface of a collector who send data to prover
@@ -35,135 +29,100 @@ type ProverTask interface {
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
}
func reverseMap(input map[string]string) map[string]string {
output := make(map[string]string, len(input))
for k, v := range input {
if k != "" {
output[v] = k
}
}
return output
}
// BaseProverTask a base prover task which contain series functions
type BaseProverTask struct {
cfg *config.Config
chainCfg *params.ChainConfig
db *gorm.DB
cfg *config.Config
db *gorm.DB
// key is hardForkName, value is vk
vkMap map[string]string
// key is vk, value is hardForkName
reverseVkMap map[string]string
nameForkMap map[string]uint64
forkHeights []uint64
batchOrm *orm.Batch
chunkOrm *orm.Chunk
bundleOrm *orm.Bundle
blockOrm *orm.L2Block
proverTaskOrm *orm.ProverTask
proverBlockListOrm *orm.ProverBlockList
}
type proverTaskContext struct {
PublicKey string
ProverName string
ProverVersion string
ProverProviderType uint8
HardForkNames map[string]struct{}
taskType message.ProofType
chunkTask *orm.Chunk
batchTask *orm.Batch
bundleTask *orm.Bundle
}
// hardForkName get the chunk/batch/bundle hard fork name
func (b *BaseProverTask) hardForkName(ctx *gin.Context, taskCtx *proverTaskContext) (string, error) {
switch {
case taskCtx.taskType == message.ProofTypeChunk:
if taskCtx.chunkTask == nil {
return "", errors.New("chunk task is nil")
}
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), taskCtx.chunkTask.StartBlockNumber)
if getBlockErr != nil {
return "", getBlockErr
}
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
return hardForkName, nil
case taskCtx.taskType == message.ProofTypeBatch:
if taskCtx.batchTask == nil {
return "", errors.New("batch task is nil")
}
startChunk, getChunkErr := b.chunkOrm.GetChunkByHash(ctx, taskCtx.batchTask.StartChunkHash)
if getChunkErr != nil {
return "", getChunkErr
}
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
if getBlockErr != nil {
return "", getBlockErr
}
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
return hardForkName, nil
case taskCtx.taskType == message.ProofTypeBundle:
if taskCtx.bundleTask == nil {
return "", errors.New("bundle task is nil")
}
startBatch, getBatchErr := b.batchOrm.GetBatchByHash(ctx, taskCtx.bundleTask.StartBatchHash)
if getBatchErr != nil {
return "", getBatchErr
}
startChunk, getChunkErr := b.chunkOrm.GetChunkByHash(ctx, startBatch.StartChunkHash)
if getChunkErr != nil {
return "", getChunkErr
}
l2Block, getBlockErr := b.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
if getBlockErr != nil {
return "", getBlockErr
}
hardForkName := encoding.GetHardforkName(b.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
return hardForkName, nil
default:
return "", errors.New("illegal task type")
}
}
// hardForkSanityCheck check the task's hard fork name is the same as prover
func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTaskContext) (string, error) {
hardForkName, getHardForkErr := b.hardForkName(ctx, taskCtx)
if getHardForkErr != nil {
return "", getHardForkErr
}
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
return "", errors.New("to be assigned prover task's hard-fork name is not the same as prover")
}
return hardForkName, nil
PublicKey string
ProverName string
ProverVersion string
HardForkName string
}
// checkParameter check the prover task parameter illegal
func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, error) {
func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*proverTaskContext, error) {
var ptc proverTaskContext
ptc.HardForkNames = make(map[string]struct{})
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, errors.New("get public key from context failed")
return nil, fmt.Errorf("get public key from context failed")
}
ptc.PublicKey = publicKey.(string)
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, errors.New("get prover name from context failed")
return nil, fmt.Errorf("get prover name from context failed")
}
ptc.ProverName = proverName.(string)
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, errors.New("get prover version from context failed")
return nil, fmt.Errorf("get prover version from context failed")
}
ptc.ProverVersion = proverVersion.(string)
ProverProviderType, ProverProviderTypeExist := ctx.Get(coordinatorType.ProverProviderTypeKey)
if !ProverProviderTypeExist {
// for backward compatibility, set ProverProviderType as internal
ProverProviderType = float64(coordinatorType.ProverProviderTypeInternal)
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
}
ptc.ProverProviderType = uint8(ProverProviderType.(float64))
hardForkNamesStr, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, errors.New("get hard fork name from context failed")
}
hardForkNames := strings.Split(hardForkNamesStr.(string), ",")
for _, hardForkName := range hardForkNames {
ptc.HardForkNames[hardForkName] = struct{}{}
// signals that the prover is multi-circuits version
if len(getTaskParameter.VKs) > 0 {
if len(getTaskParameter.VKs) != 2 {
return nil, fmt.Errorf("parameter vks length must be 2")
}
for _, vk := range getTaskParameter.VKs {
if _, exists := b.reverseVkMap[vk]; !exists {
return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk)
}
}
} else {
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, fmt.Errorf("get hard fork name from context failed")
}
ptc.HardForkName = hardForkName.(string)
vk, vkExist := b.vkMap[ptc.HardForkName]
if !vkExist {
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
}
// if the prover has a different vk
if getTaskParameter.VK != vk {
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
// if the prover reports a different prover version
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
// if the prover reports a same prover version
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
}
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
@@ -185,6 +144,26 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
return &ptc, nil
}
func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error) {
// when the first hard fork upgrade, the prover don't pass the fork_name to coordinator.
// so coordinator need to be compatible.
if forkName == "" {
return 0, nil
}
hardForkNumber, exist := b.nameForkMap[forkName]
if !exist {
return 0, ErrHardForkName
}
return hardForkNumber, nil
}
var (
getTaskCounterInitOnce sync.Once
getTaskCounterVec *prometheus.CounterVec = nil
)
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{

View File

@@ -4,15 +4,14 @@ import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
@@ -36,26 +35,21 @@ var (
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
// ErrValidatorFailureVerifiedFailed failed to verify and the verifier returns error
ErrValidatorFailureVerifiedFailed = errors.New("verification failed, verifier returns error")
ErrValidatorFailureVerifiedFailed = fmt.Errorf("verification failed, verifier returns error")
// ErrValidatorSuccessInvalidProof successful verified and the proof is invalid
ErrValidatorSuccessInvalidProof = errors.New("verification succeeded, it's an invalid proof")
// ErrGetHardForkNameFailed failed to get hard fork name
ErrGetHardForkNameFailed = errors.New("failed to get hard fork name")
ErrValidatorSuccessInvalidProof = fmt.Errorf("verification succeeded, it's an invalid proof")
// ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = errors.New("coordinator internal error")
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
)
// ProofReceiverLogic the proof receiver logic
type ProofReceiverLogic struct {
chunkOrm *orm.Chunk
batchOrm *orm.Batch
bundleOrm *orm.Bundle
blockOrm *orm.L2Block
proverTaskOrm *orm.ProverTask
db *gorm.DB
cfg *config.ProverManager
chainCfg *params.ChainConfig
db *gorm.DB
cfg *config.ProverManager
verifier *verifier.Verifier
@@ -72,17 +66,14 @@ type ProofReceiverLogic struct {
}
// NewSubmitProofReceiverLogic create a proof receiver logic
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
return &ProofReceiverLogic{
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
cfg: cfg,
chainCfg: chainCfg,
db: db,
cfg: cfg,
db: db,
verifier: vf,
@@ -133,30 +124,47 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
// HandleZkProof handle a ZkProof submitted from a prover.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error {
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) error {
m.proofReceivedTotal.Inc()
pk := ctx.GetString(coordinatorType.PublicKey)
if len(pk) == 0 {
return errors.New("get public key from context failed")
return fmt.Errorf("get public key from context failed")
}
pv := ctx.GetString(coordinatorType.ProverVersion)
if len(pv) == 0 {
return errors.New("get ProverVersion from context failed")
return fmt.Errorf("get ProverVersion from context failed")
}
// use hard_fork_name from parameter first
// if prover support multi hard_forks, the real hard_fork_name is not set to the gin context
hardForkName := proofParameter.HardForkName
if hardForkName == "" {
hardForkName = ctx.GetString(coordinatorType.HardForkName)
}
proverTask, err := m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofParameter.TaskID, "error", err)
return ErrValidatorFailureProverTaskEmpty
var proverTask *orm.ProverTask
var err error
if proofParameter.UUID != "" {
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
} else {
// TODO When prover all have upgrade, need delete this logic
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proofID", proofParameter.TaskID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
if err = m.validator(ctx.Copy(), proverTask, pk, proofParameter); err != nil {
if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
return err
}
@@ -164,39 +172,18 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
success := true
var verifyErr error
hardForkName, getHardForkErr := m.hardForkName(ctx, proofParameter.TaskID, proofParameter.TaskType)
if getHardForkErr != nil {
return ErrGetHardForkNameFailed
}
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:
chunkProof := message.NewChunkProof(hardForkName)
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyChunkProof(chunkProof, hardForkName)
case message.ProofTypeBatch:
batchProof := message.NewBatchProof(hardForkName)
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyBatchProof(batchProof, hardForkName)
case message.ProofTypeBundle:
bundleProof := message.NewBundleProof(hardForkName)
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyBundleProof(bundleProof, hardForkName)
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
if proofMsg.Type == message.ProofTypeBatch {
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
}
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofParameter)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofParameter.TaskID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofParameter.TaskType, "proof time", proofTimeSec, "error", verifyErr)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
if verifyErr != nil {
return ErrValidatorFailureVerifiedFailed
@@ -206,13 +193,13 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
log.Info("proof verified and valid", "proof id", proofParameter.TaskID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofParameter.TaskType, "proof time", proofTimeSec)
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
if err := m.closeProofTask(ctx.Copy(), proverTask, proofParameter, proofTimeSec); err != nil {
if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofParameter)
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
return ErrCoordinatorInternalFailure
}
@@ -225,6 +212,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
if err != nil {
return err
}
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batch.BatchHash)
if err != nil {
return err
@@ -238,7 +226,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
return nil
}
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofParameter coordinatorType.SubmitProofParameter) (err error) {
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
defer func() {
if err != nil {
m.validateFailureTotal.Inc()
@@ -255,9 +243,9 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
// (ii) set the maximum failure retry times
log.Warn(
"cannot submit valid proof for a prover task twice",
"taskType", proverTask.TaskType, "hash", proofParameter.TaskID,
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", proverTask.ProverPublicKey,
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
)
return ErrValidatorFailureProverTaskCannotSubmitTwice
}
@@ -265,60 +253,59 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
if proofParameter.Status != int(coordinatorType.StatusOk) {
if proofMsg.Status != message.StatusOk {
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofParameter)
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofMsg)
m.validateFailureProverTaskStatusNotOk.Inc()
log.Info("proof generated by prover failed",
"taskType", proofParameter.TaskType, "hash", proofParameter.TaskID, "proverName", proverTask.ProverName,
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", failureMsg)
"failureMessage", failureMsg, "forkName", forkName)
return ErrValidatorFailureProofMsgStatusNotOk
}
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk,
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
}
// if the batch/chunk have proved and verifier success, need skip this submit proof
if m.checkIsTaskSuccess(ctx, proofParameter.TaskID, message.ProofType(proofParameter.TaskType)) {
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeObjectAlreadyVerified, proofParameter)
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
m.validateFailureProverTaskHaveVerifier.Inc()
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofParameter.TaskID,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil
}
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofParameter coordinatorType.SubmitProofParameter) {
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskUnassigned.String())
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofInvalid, failureType, 0); err != nil {
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofInvalid, failureType, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey, "error", err)
}
}
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofParameter coordinatorType.SubmitProofParameter, proofTimeSec uint64) error {
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
return err
}
@@ -327,14 +314,14 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask,
proofParameter coordinatorType.SubmitProofParameter, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error {
proofMsg *message.ProofMsg, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error {
err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatusAndFailureType(ctx, proverTask.UUID, status, failureType, tx); updateErr != nil {
log.Error("failed to update prover task proving status and failure type", "uuid", proverTask.UUID, "error", updateErr)
return updateErr
}
switch message.ProofType(proofParameter.TaskType) {
switch proofMsg.Type {
case message.ProofTypeChunk:
if err := m.chunkOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "hash", proverTask.TaskID, "error", err)
@@ -345,28 +332,21 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
log.Error("failed to update batch proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
case message.ProofTypeBundle:
if err := m.bundleOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
log.Error("failed to update bundle proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, message.ProofType(proofParameter.TaskType)) {
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, proofMsg.Type) {
log.Info("update proof status skip because this chunk/batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey)
return nil
}
if status == types.ProverProofValid {
var storeProofErr error
switch message.ProofType(proofParameter.TaskType) {
switch proofMsg.Type {
case message.ProofTypeChunk:
storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx)
storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, types.ProvingTaskVerified, proofTimeSec, tx)
case message.ProofTypeBatch:
storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx)
case message.ProofTypeBundle:
storeProofErr = m.bundleOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx)
storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.BatchProof, types.ProvingTaskVerified, proofTimeSec, tx)
}
if storeProofErr != nil {
log.Error("failed to store chunk/batch proof and proving status", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr)
@@ -380,7 +360,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
return err
}
if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk {
if status == types.ProverProofValid && proofMsg.Type == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr
@@ -405,63 +385,24 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
if err != nil {
return false
}
case message.ProofTypeBundle:
provingStatus, err = m.bundleOrm.GetProvingStatusByHash(ctx, hash)
if err != nil {
return false
}
}
return provingStatus == types.ProvingTaskVerified
}
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofParameter coordinatorType.SubmitProofParameter) error {
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, []byte(proofParameter.Proof))
}
func (m *ProofReceiverLogic) hardForkName(ctx *gin.Context, hash string, proofType int) (string, error) {
var (
bundle *orm.Bundle
batch *orm.Batch
chunk *orm.Chunk
err error
)
switch message.ProofType(proofType) {
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) error {
// store the proof to prover task
var proofBytes []byte
var marshalErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
chunk, err = m.chunkOrm.GetChunkByHash(ctx, hash)
proofBytes, marshalErr = json.Marshal(proofMsg.ChunkProof)
case message.ProofTypeBatch:
batch, err = m.batchOrm.GetBatchByHash(ctx, hash)
case message.ProofTypeBundle:
bundle, err = m.bundleOrm.GetBundleByHash(ctx, hash)
proofBytes, marshalErr = json.Marshal(proofMsg.BatchProof)
}
if err != nil {
return "", err
if len(proofBytes) == 0 || marshalErr != nil {
return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr)
}
if bundle != nil {
batch, err = m.batchOrm.GetBatchByHash(ctx, bundle.StartBatchHash)
if err != nil {
return "", err
}
}
if batch != nil {
chunk, err = m.chunkOrm.GetChunkByHash(ctx, batch.StartChunkHash)
if err != nil {
return "", err
}
}
if chunk == nil {
return "", errors.New("failed to find chunk")
}
l2Block, getBlockErr := m.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunk.StartBlockNumber)
if getBlockErr != nil {
return "", getBlockErr
}
hardForkName := encoding.GetHardforkName(m.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
return hardForkName, nil
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, proofBytes)
}

View File

@@ -10,30 +10,38 @@ import (
// NewVerifier Sets up a mock verifier.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
batchVKMap := map[string]struct{}{"mock_vk": {}}
chunkVKMap := map[string]struct{}{"mock_vk": {}}
batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBatchProof return a mock verification result for a BatchProof.
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
// VerifyBundleProof return a mock verification result for a BundleProof.
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
if string(proof.Proof()) == InvalidTestProof {
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil

View File

@@ -9,9 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct {
cfg *config.VerifierConfig
ChunkVKMap map[string]struct{}
BatchVKMap map[string]struct{}
BundleVkMap map[string]struct{}
OpenVMVkMap map[string]struct{}
cfg *config.VerifierConfig
ChunkVKMap map[string]string
BatchVKMap map[string]string
}

View File

@@ -11,111 +11,84 @@ package verifier
import "C" //nolint:typecheck
import (
"embed"
"encoding/base64"
"encoding/json"
"io"
"io/fs"
"os"
"path"
"unsafe"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/common/types/message"
)
// This struct maps to `CircuitConfig` in common/libzkp/impl/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
ForkName string `json:"fork_name"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
}
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
return &rustCircuitConfig{
ForkName: cfg.ForkName,
ParamsPath: cfg.ParamsPath,
AssetsPath: cfg.AssetsPath,
}
}
// This struct maps to `VerifierConfig` in common/libzkp/impl/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
LowVersionCircuit *rustCircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
}
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
return &rustVerifierConfig{
LowVersionCircuit: newRustCircuitConfig(cfg.LowVersionCircuit),
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
}
}
type rustVkDump struct {
Chunk string `json:"chunk_vk"`
Batch string `json:"batch_vk"`
Bundle string `json:"bundle_vk"`
}
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
chunkVKMap := map[string]struct{}{"mock_vk": {}}
batchVKMap := map[string]struct{}{"mock_vk": {}}
bundleVKMap := map[string]struct{}{"mock_vk": {}}
openVMVkMap := map[string]struct{}{"mock_vk": {}}
return &Verifier{
cfg: cfg,
ChunkVKMap: chunkVKMap,
BatchVKMap: batchVKMap,
BundleVkMap: bundleVKMap,
OpenVMVkMap: openVMVkMap,
}, nil
batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
verifierConfig := newRustVerifierConfig(cfg)
configBytes, err := json.Marshal(verifierConfig)
paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath)
defer func() {
C.free(unsafe.Pointer(paramsPathStr))
C.free(unsafe.Pointer(assetsPathStr))
}()
C.init_batch_verifier(paramsPathStr, assetsPathStr)
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]string),
BatchVKMap: make(map[string]string),
}
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
if err != nil {
return nil, err
}
configStr := C.CString(string(configBytes))
defer func() {
C.free(unsafe.Pointer(configStr))
}()
C.init(configStr)
v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]struct{}),
BatchVKMap: make(map[string]struct{}),
BundleVkMap: make(map[string]struct{}),
OpenVMVkMap: make(map[string]struct{}),
}
if err := v.loadLowVersionVKs(cfg); err != nil {
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
if err != nil {
return nil, err
}
v.BatchVKMap[cfg.ForkName] = batchVK
v.ChunkVKMap[cfg.ForkName] = chunkVK
if err := v.loadOpenVMVks(cfg.HighVersionCircuit.ForkName); err != nil {
if err := v.loadEmbedVK(); err != nil {
return nil, err
}
v.loadCurieVersionVKs()
return v, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (bool, error) {
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, batch verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
@@ -139,37 +112,10 @@ func (v *Verifier) VerifyBatchProof(proof message.BatchProof, forkName string) (
}
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyChunkProof(proof message.ChunkProof, forkName string) (bool, error) {
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
return false, nil
}
return true, nil
}
buf, err := json.Marshal(proof)
if err != nil {
return false, err
}
log.Info("Start to verify chunk proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_chunk_proof(proofStr, forkNameStr)
return verified != 0, nil
}
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof()) == InvalidTestProof {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
@@ -181,14 +127,12 @@ func (v *Verifier) VerifyBundleProof(proof message.BundleProof, forkName string)
}
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
log.Info("Start to verify bundle proof ...")
verified := C.verify_bundle_proof(proofStr, forkNameStr)
log.Info("Start to verify chunk proof ...")
verified := C.verify_chunk_proof(proofStr)
return verified != 0, nil
}
@@ -204,61 +148,23 @@ func (v *Verifier) readVK(filePat string) (string, error) {
return base64.StdEncoding.EncodeToString(byt), nil
}
// load low version vks, current is darwin
func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error {
bundleVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_bundle.vkey"))
//go:embed legacy_vk/*
var legacyVKFS embed.FS
func (v *Verifier) loadEmbedVK() error {
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
if err != nil {
log.Error("load embed batch vk failure", "err", err)
return err
}
batchVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_batch.vkey"))
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
if err != nil {
log.Error("load embed chunk vk failure", "err", err)
return err
}
chunkVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_chunk.vkey"))
if err != nil {
return err
}
v.BundleVkMap[bundleVK] = struct{}{}
v.BatchVKMap[batchVK] = struct{}{}
v.ChunkVKMap[chunkVK] = struct{}{}
return nil
}
func (v *Verifier) loadCurieVersionVKs() {
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD9jfGkei+f0wNYpkjW7JO12EfU7CjYVBo+PGku3zaQJI64lbn6BwyTBa4RfrPFpV5mP47ix0sXZ+Wt5wklMLRW7OIJb1yfCDm+gkSsp3/Zqrxt4SY4rQ4WtHfynTCQ0KDi78jNuiFvwxO3ub3DkgGVaxMkGxTRP/Vz6E7MCZMUBR5wZFcMzJn+73f0wYjDxfj00krg9O1VrwVxbVV1ycLR6oQLcOgm/l+xwth8io0vDpF9OY21gD5DgJn9GgcYe8KoRVEbEqApLZPdBibpcSMTY9czZI2LnFcqrDDmYvhEwgjhZrsTog2xLXOODoOupZ/is5ekQ9Gi0y871b1mLlCGA="] = struct{}{}
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
}
func (v *Verifier) loadOpenVMVks(forkName string) error {
tempFile := path.Join(os.TempDir(), "openVmVk.json")
defer func() {
if err := os.Remove(tempFile); err != nil {
log.Error("failed to remove temp file", "err", err)
}
}()
forkNameCStr := C.CString(forkName)
defer C.free(unsafe.Pointer(forkNameCStr))
tempFileCStr := C.CString(tempFile)
defer C.free(unsafe.Pointer(tempFileCStr))
C.dump_vk(forkNameCStr, tempFileCStr)
f, err := os.Open(tempFile)
if err != nil {
return err
}
byt, err := io.ReadAll(f)
if err != nil {
return err
}
var dump rustVkDump
if err := json.Unmarshal(byt, &dump); err != nil {
return err
}
v.OpenVMVkMap[dump.Chunk] = struct{}{}
v.OpenVMVkMap[dump.Batch] = struct{}{}
v.OpenVMVkMap[dump.Bundle] = struct{}{}
v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
return nil
}

View File

@@ -11,15 +11,14 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
var (
paramsPath = flag.String("params", "/assets/test_params", "params dir")
assetsPathLo = flag.String("assets_lo", "/assets/test_assets_lo", "assets dir")
assetsPathHi = flag.String("assets", "/assets/test_assets", "assets dir")
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
batchProofPath = flag.String("batch_proof", "/assets/proof_data/batch_proof", "batch proof file path")
chunkProofPath1 = flag.String("chunk_proof1", "/assets/proof_data/chunk_proof1", "chunk proof file path 1")
chunkProofPath2 = flag.String("chunk_proof2", "/assets/proof_data/chunk_proof2", "chunk proof file path 2")
@@ -29,62 +28,52 @@ func TestFFI(t *testing.T) {
as := assert.New(t)
cfg := &config.VerifierConfig{
MockMode: false,
LowVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathLo,
ForkName: "darwin",
MinProverVersion: "",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathHi,
ForkName: "darwinV2",
MinProverVersion: "",
},
MockMode: false,
ParamsPath: *paramsPath,
AssetsPath: *assetsPath,
}
v, err := NewVerifier(cfg)
as.NoError(err)
chunkProof1 := readChunkProof(*chunkProofPath1, as)
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "darwinV2")
chunkOk1, err := v.VerifyChunkProof(chunkProof1)
as.NoError(err)
as.True(chunkOk1)
t.Log("Verified chunk proof 1")
chunkProof2 := readChunkProof(*chunkProofPath2, as)
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "darwinV2")
chunkOk2, err := v.VerifyChunkProof(chunkProof2)
as.NoError(err)
as.True(chunkOk2)
t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof, "darwinV2")
batchOk, err := v.VerifyBatchProof(batchProof, "curie")
as.NoError(err)
as.True(batchOk)
t.Log("Verified batch proof")
}
func readBatchProof(filePat string, as *assert.Assertions) types.BatchProof {
func readBatchProof(filePat string, as *assert.Assertions) *message.BatchProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.Halo2BatchProof{}
proof := &message.BatchProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof
}
func readChunkProof(filePat string, as *assert.Assertions) types.ChunkProof {
func readChunkProof(filePat string, as *assert.Assertions) *message.ChunkProof {
f, err := os.Open(filePat)
as.NoError(err)
byt, err := io.ReadAll(f)
as.NoError(err)
proof := &types.Halo2ChunkProof{}
proof := &message.ChunkProof{}
as.NoError(json.Unmarshal(byt, proof))
return proof

View File

@@ -2,15 +2,18 @@ package orm
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/da-codec/encoding/codecv0"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
@@ -30,9 +33,6 @@ type Batch struct {
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
@@ -59,9 +59,6 @@ type Batch struct {
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// bundle
BundleHash string `json:"bundle_hash" gorm:"column:bundle_hash"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -79,12 +76,12 @@ func (*Batch) TableName() string {
}
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
// The returned batch are sorted in ascending order by their index.
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
var batch Batch
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND codec_version != 5 AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady))
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
err := db.Raw(sql).Scan(&batch).Error
if err != nil {
return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err)
@@ -95,30 +92,13 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTo
return &batch, nil
}
// GetUnassignedBatchCount retrieves unassigned batch count.
func (o *Batch) GetUnassignedBatchCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
var count int64
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
db = db.Where("codec_version != 5")
db = db.Where("batch.deleted_at IS NULL")
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Batch.GetUnassignedBatchCount error: %w", err)
}
return count, nil
}
// GetAssignedBatch retrieves assigned batch based on the specified limit.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
// The returned batch are sorted in ascending order by their index.
func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
var batch Batch
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady))
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
err := db.Raw(sql).Scan(&batch).Error
if err != nil {
return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err)
@@ -204,59 +184,6 @@ func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int1
return batch.ActiveAttempts, batch.TotalAttempts, nil
}
// CheckIfBundleBatchProofsAreReady checks if all proofs for all batches of a given bundleHash are collected.
func (o *Batch) CheckIfBundleBatchProofsAreReady(ctx context.Context, bundleHash string) (bool, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("bundle_hash = ? AND proving_status != ?", bundleHash, types.ProvingTaskVerified)
var count int64
if err := db.Count(&count).Error; err != nil {
return false, fmt.Errorf("Chunk.CheckIfBundleBatchProofsAreReady error: %w, bundle hash: %v", err, bundleHash)
}
return count == 0, nil
}
// GetBatchByHash retrieves the given batch.
func (o *Batch) GetBatchByHash(ctx context.Context, hash string) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash = ?", hash)
var batch Batch
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByHash error: %w, batch hash: %v", err, hash)
}
return &batch, nil
}
// GetBatchesByBundleHash retrieves the given batch.
func (o *Batch) GetBatchesByBundleHash(ctx context.Context, bundleHash string) ([]*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("bundle_hash = ?", bundleHash)
db = db.Order("index ASC")
var batches []*Batch
if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchesByBundleHash error: %w, bundle hash: %v", err, bundleHash)
}
return batches, nil
}
// GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
var batch Batch
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
}
return &batch, nil
}
// InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
if batch == nil {
@@ -268,16 +195,11 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
return nil, errors.New("invalid args: batch contains 0 chunk")
}
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
if err != nil {
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
daBatch, err := codec.NewDABatch(batch)
daBatch, err := codecv0.NewDABatch(batch)
if err != nil {
log.Error("failed to create new DA batch",
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, err
}
@@ -285,7 +207,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil {
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
@@ -296,17 +218,17 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
startChunkIndex = parentBatch.EndChunkIndex + 1
}
startDAChunk, err := codec.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to create start DA chunk", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
startDAChunkHash, err := startDAChunk.Hash()
if err != nil {
log.Error("failed to get start DA chunk hash", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
@@ -314,24 +236,24 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
for i := uint64(0); i < numChunks-1; i++ {
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
}
endDAChunk, err := codec.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
if err != nil {
log.Error("failed to create end DA chunk", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
endDAChunkHash, err := endDAChunk.Hash()
if err != nil {
log.Error("failed to get end DA chunk hash", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
"parent hash", batch.ParentBatchHash.Hex(), "number of chunks", numChunks, "err", err)
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
newBatch := Batch{
Index: batch.Index,
Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(),
@@ -395,14 +317,18 @@ func (o *Batch) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
}
// UpdateProofAndProvingStatusByHash updates the batch proof and proving status by hash.
func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.BatchProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proof
updateFields["proof"] = proofBytes
updateFields["proving_status"] = provingStatus
updateFields["proof_time_sec"] = proofTimeSec
updateFields["proved_at"] = utils.NowUTC()

View File

@@ -1,245 +0,0 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/utils"
)
// Bundle represents a bundle of batches.
type Bundle struct {
db *gorm.DB `gorm:"column:-"`
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
StartBatchIndex uint64 `json:"start_batch_index" gorm:"column:start_batch_index"`
StartBatchHash string `json:"start_batch_hash" gorm:"column:start_batch_hash"`
EndBatchIndex uint64 `json:"end_batch_index" gorm:"column:end_batch_index"`
EndBatchHash string `json:"end_batch_hash" gorm:"column:end_batch_hash"`
// proof
BatchProofsStatus int16 `json:"batch_proofs_status" gorm:"column:batch_proofs_status;default:1"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
// rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewBundle creates a new Bundle database instance.
func NewBundle(db *gorm.DB) *Bundle {
return &Bundle{db: db}
}
// TableName returns the table name for the Bundle model.
func (*Bundle) TableName() string {
return "bundle"
}
// GetUnassignedBundle retrieves unassigned bundle based on the specified limit.
// The returned batch sorts in ascending order by their index.
func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
var bundle Bundle
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND codec_version != 5 AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady))
err := db.Raw(sql).Scan(&bundle).Error
if err != nil {
return nil, fmt.Errorf("Batch.GetUnassignedBundle error: %w", err)
}
if bundle.StartBatchHash == "" || bundle.EndBatchHash == "" {
return nil, nil
}
return &bundle, nil
}
// GetUnassignedBundleCount retrieves unassigned bundle count.
func (o *Bundle) GetUnassignedBundleCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
var count int64
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("batch_proofs_status = ?", int(types.BatchProofsStatusReady))
db = db.Where("codec_version != 5")
db = db.Where("bundle.deleted_at IS NULL")
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Bundle.GetUnassignedBundleCount error: %w", err)
}
return count, nil
}
// GetAssignedBundle retrieves assigned bundle based on the specified limit.
// The returned bundle sorts in ascending order by their index.
func (o *Bundle) GetAssignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
var bundle Bundle
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady))
err := db.Raw(sql).Scan(&bundle).Error
if err != nil {
return nil, fmt.Errorf("Bundle.GetAssignedBatch error: %w", err)
}
if bundle.StartBatchHash == "" || bundle.EndBatchHash == "" {
return nil, nil
}
return &bundle, nil
}
// GetProvingStatusByHash retrieves the proving status of a bundle given its hash.
func (o *Bundle) GetProvingStatusByHash(ctx context.Context, hash string) (types.ProvingStatus, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Select("proving_status")
db = db.Where("hash = ?", hash)
var bundle Bundle
if err := db.Find(&bundle).Error; err != nil {
return types.ProvingStatusUndefined, fmt.Errorf("Bundle.GetProvingStatusByHash error: %w, batch hash: %v", err, hash)
}
return types.ProvingStatus(bundle.ProvingStatus), nil
}
// GetBundleByHash retrieves the given
func (o *Bundle) GetBundleByHash(ctx context.Context, bundleHash string) (*Bundle, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash = ?", bundleHash)
var bundle Bundle
if err := db.First(&bundle).Error; err != nil {
return nil, fmt.Errorf("Bundle.GetBundleByHash error: %w, bundle hash: %v", err, bundleHash)
}
return &bundle, nil
}
// GetUnassignedAndBatchesUnreadyBundles get the bundles which is unassigned and batches are not ready
func (o *Bundle) GetUnassignedAndBatchesUnreadyBundles(ctx context.Context, offset, limit int) ([]*Bundle, error) {
if offset < 0 || limit < 0 {
return nil, errors.New("limit and offset must not be smaller than 0")
}
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
db = db.Where("batch_proofs_status = ?", types.BatchProofsStatusPending)
db = db.Order("index ASC")
db = db.Offset(offset)
db = db.Limit(limit)
var bundles []*Bundle
if err := db.Find(&bundles).Error; err != nil {
return nil, fmt.Errorf("Bundle.GetUnassignedAndBatchesUnreadyBundles error: %w", err)
}
return bundles, nil
}
// UpdateBatchProofsStatusByBundleHash updates the status of batch_proofs_status field for a given bundle hash.
func (o *Bundle) UpdateBatchProofsStatusByBundleHash(ctx context.Context, bundleHash string, status types.BatchProofsStatus) error {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash = ?", bundleHash)
if err := db.Update("batch_proofs_status", status).Error; err != nil {
return fmt.Errorf("Bundle.UpdateBatchProofsStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String())
}
return nil
}
// UpdateProvingStatusFailed updates the proving status failed of a bundle.
func (o *Bundle) UpdateProvingStatusFailed(ctx context.Context, bundleHash string, maxAttempts uint8, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash", bundleHash)
db = db.Where("total_attempts >= ?", maxAttempts)
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
return fmt.Errorf("Bundle.UpdateProvingStatus error: %w, bundle hash: %v, status: %v", err, bundleHash, types.ProvingTaskFailed.String())
}
return nil
}
// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash.
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proof
updateFields["proving_status"] = provingStatus
updateFields["proof_time_sec"] = proofTimeSec
updateFields["proved_at"] = utils.NowUTC()
db = db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
return nil
}
// UpdateBundleAttempts atomically increments the attempts count for the earliest available bundle that meets the conditions.
func (o *Bundle) UpdateBundleAttempts(ctx context.Context, hash string, curActiveAttempts, curTotalAttempts int16) (int64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash = ?", hash)
db = db.Where("active_attempts = ?", curActiveAttempts)
db = db.Where("total_attempts = ?", curTotalAttempts)
result := db.Updates(map[string]interface{}{
"proving_status": types.ProvingTaskAssigned,
"total_attempts": gorm.Expr("total_attempts + 1"),
"active_attempts": gorm.Expr("active_attempts + 1"),
})
if result.Error != nil {
return 0, fmt.Errorf("failed to update bundle, err:%w", result.Error)
}
return result.RowsAffected, nil
}
// DecreaseActiveAttemptsByHash decrements the active_attempts of a bundle given its hash.
func (o *Bundle) DecreaseActiveAttemptsByHash(ctx context.Context, bundleHash string, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash = ?", bundleHash)
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
db = db.Where("active_attempts > ?", 0)
result := db.UpdateColumn("active_attempts", gorm.Expr("active_attempts - 1"))
if result.Error != nil {
return fmt.Errorf("Bundle.DecreaseActiveAttemptsByHash error: %w, bundle hash: %v", result.Error, bundleHash)
}
if result.RowsAffected == 0 {
log.Warn("No rows were affected in DecreaseActiveAttemptsByHash", "bundle hash", bundleHash)
}
return nil
}

View File

@@ -2,7 +2,6 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -54,7 +53,7 @@ func (r *Challenge) InsertChallenge(ctx context.Context, challengeString string)
return fmt.Errorf("the challenge string:%s have been used", challengeString)
}
return errors.New("insert challenge string affected rows more than 1")
return fmt.Errorf("insert challenge string affected rows more than 1")
}
// DeleteExpireChallenge delete the expire challenge

View File

@@ -2,15 +2,18 @@ package orm
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/da-codec/encoding/codecv0"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
@@ -28,8 +31,6 @@ type Chunk struct {
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint64 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
PrevL1MessageQueueHash string `json:"prev_l1_message_queue_hash" gorm:"column:prev_l1_message_queue_hash"`
PostL1MessageQueueHash string `json:"post_l1_message_queue_hash" gorm:"column:post_l1_message_queue_hash"`
ParentChunkHash string `json:"parent_chunk_hash" gorm:"column:parent_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
ParentChunkStateRoot string `json:"parent_chunk_state_root" gorm:"column:parent_chunk_state_root"`
@@ -73,11 +74,11 @@ func (*Chunk) TableName() string {
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) {
func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
var chunk Chunk
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND end_block_number <= %d AND codec_version != 5 AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, height)
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
err := db.Raw(sql).Scan(&chunk).Error
if err != nil {
return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
@@ -88,30 +89,13 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTo
return &chunk, nil
}
// GetUnassignedChunkCount retrieves unassigned chunk count.
func (o *Chunk) GetUnassignedChunkCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (int64, error) {
var count int64
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("end_block_number <= ?", height)
db = db.Where("codec_version != 5")
db = db.Where("chunk.deleted_at IS NULL")
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Chunk.GetUnassignedChunkCount error: %w", err)
}
return count, nil
}
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) {
func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
var chunk Chunk
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND end_block_number <= %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, height)
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
err := db.Raw(sql).Scan(&chunk).Error
if err != nil {
return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
@@ -137,6 +121,32 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*
return chunks, nil
}
// GetProofsByBatchHash retrieves the proofs associated with a specific batch hash.
// It returns a slice of decoded proofs (message.ChunkProof) obtained from the database.
// The returned proofs are sorted in ascending order by their associated chunk index.
func (o *Chunk) GetProofsByBatchHash(ctx context.Context, batchHash string) ([]*message.ChunkProof, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash", batchHash)
db = db.Order("index ASC")
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash error: %w, batch hash: %v", err, batchHash)
}
var proofs []*message.ChunkProof
for _, chunk := range chunks {
var proof message.ChunkProof
if err := json.Unmarshal(chunk.Proof, &proof); err != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", err, batchHash, chunk.Hash)
}
proofs = append(proofs, &proof)
}
return proofs, nil
}
// getLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) getLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
@@ -248,12 +258,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
parentChunkStateRoot = parentChunk.StateRoot
}
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
if err != nil {
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
daChunk, err := codec.NewDAChunk(chunk, totalL1MessagePoppedBefore)
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to initialize new DA chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
@@ -265,13 +270,13 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitCalldataSize, err := codec.EstimateChunkL1CommitCalldataSize(chunk)
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
totalL1CommitGas, err := codec.EstimateChunkL1CommitGas(chunk)
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
if err != nil {
log.Error("failed to estimate chunk L1 commit gas", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
@@ -285,7 +290,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: chunk.TotalGasUsed(),
TotalL2TxGas: chunk.L2GasUsed(),
TotalL2TxNum: chunk.NumL2Transactions(),
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
TotalL1CommitGas: totalL1CommitGas,
@@ -335,14 +340,18 @@ func (o *Chunk) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
}
// UpdateProofAndProvingStatusByHash updates the chunk proof and proving_status by hash.
func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.ChunkProof, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proof
updateFields["proof"] = proofBytes
updateFields["proving_status"] = int(status)
updateFields["proof_time_sec"] = proofTimeSec
updateFields["proved_at"] = utils.NowUTC()

View File

@@ -74,68 +74,6 @@ func (o *L2Block) GetL2BlockHashesByChunkHash(ctx context.Context, chunkHash str
return blockHashes, nil
}
// GetL2BlockByNumber retrieves the L2 block by l2 block number
func (o *L2Block) GetL2BlockByNumber(ctx context.Context, blockNumber uint64) (*L2Block, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number = ?", blockNumber)
var l2Block L2Block
if err := db.First(&l2Block).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlockByNumber error: %w, chunk block number: %v", err, blockNumber)
}
return &l2Block, nil
}
// GetL2BlocksInRange retrieves the L2 blocks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end block numbers.
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*encoding.Block, error) {
if startBlockNumber > endBlockNumber {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber)
}
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_root, row_consumption")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
// sanity check
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks))
}
var blocks []*encoding.Block
for _, v := range l2Blocks {
var block encoding.Block
if err := json.Unmarshal([]byte(v.Transactions), &block.Transactions); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
block.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), block.Header); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
block.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
if err := json.Unmarshal([]byte(v.RowConsumption), &block.RowConsumption); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
blocks = append(blocks, &block)
}
return blocks, nil
}
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
// for unit test
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block) error {

View File

@@ -9,12 +9,11 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
)
var (

View File

@@ -2,7 +2,6 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -66,7 +65,7 @@ func (p *ProverBlockList) IsPublicKeyBlocked(ctx context.Context, publicKey stri
db = db.Model(&ProverBlockList{})
db = db.Where("public_key = ?", publicKey)
if err := db.First(&ProverBlockList{}).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return false, nil // Public key not found, hence it's not blocked.
}
return true, fmt.Errorf("ProverBlockList.IsPublicKeyBlocked error: %w, public key: %v", err, publicKey)

View File

@@ -2,7 +2,6 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -61,7 +60,7 @@ func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bo
var task ProverTask
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
if err == gorm.ErrRecordNotFound {
return false, nil
}
return false, err
@@ -117,25 +116,23 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
return proverTasks, nil
}
// GetFailedProverTasksByHash retrieves the failed ProverTask records associated with the specified hash.
// The returned prover task objects are sorted in descending order by their ids.
func (o *ProverTask) GetFailedProverTasksByHash(ctx context.Context, taskType message.ProofType, hash string, limit int) ([]*ProverTask, error) {
// GetAssignedProverTaskByTaskIDAndProver get prover task taskID and public key
// TODO: when prover all upgrade need DEPRECATED this function
func (o *ProverTask) GetAssignedProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", hash)
db = db.Where("proving_status = ?", int(types.ProverProofInvalid))
db = db.Order("id desc")
db = db.Where("task_id", taskID)
db = db.Where("prover_public_key", proverPublicKey)
db = db.Where("prover_version", proverVersion)
db = db.Where("proving_status", types.ProverAssigned)
if limit != 0 {
db = db.Limit(limit)
var proverTask ProverTask
err := db.First(&proverTask).Error
if err != nil {
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndProver err:%w, taskID:%s, pubkey:%s, prover_version:%s", err, taskID, proverPublicKey, proverVersion)
}
var proverTasks []*ProverTask
if err := db.Find(&proverTasks).Error; err != nil {
return nil, fmt.Errorf("ProverTask.GetFailedProverTasksByHash error: %w, hash: %v", err, hash)
}
return proverTasks, nil
return &proverTask, nil
}
// GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key

View File

@@ -1,15 +1,6 @@
package types
import (
"crypto/ecdsa"
"encoding/hex"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
import "time"
const (
// PublicKey the public key for context
@@ -18,124 +9,26 @@ const (
ProverName = "prover_name"
// ProverVersion the prover version for context
ProverVersion = "prover_version"
// ProverProviderTypeKey the prover provider type for context
ProverProviderTypeKey = "prover_provider_type"
// HardForkName the hard fork name for context
// HardForkName the fork name for context
HardForkName = "hard_fork_name"
)
// Message the login message struct
type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
}
// LoginParameter for /login api
type LoginParameter struct {
Message Message `form:"message" json:"message" binding:"required"`
Signature string `form:"signature" json:"signature" binding:"required"`
}
// LoginSchema for /login response
type LoginSchema struct {
Time time.Time `json:"time"`
Token string `json:"token"`
}
type MessageWithoutProverProviderType struct {
Challenge string `json:"challenge"`
ProverVersion string `json:"prover_version"`
ProverName string `json:"prover_name"`
ProverTypes []ProverType `json:"prover_types"`
VKs []string `json:"vks"`
}
// Message the login message struct
type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
ProverProviderType ProverProviderType `form:"prover_provider_type" json:"prover_provider_type,omitempty"`
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
VKs []string `form:"vks" json:"vks"`
}
// LoginParameterWithHardForkName constructs new payload for login
type LoginParameterWithHardForkName struct {
LoginParameter
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
}
// LoginParameter for /login api
type LoginParameter struct {
Message Message `form:"message" json:"message" binding:"required"`
PublicKey string `form:"public_key" json:"public_key"`
Signature string `form:"signature" json:"signature" binding:"required"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := Hash(a.Message)
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *LoginParameter) Verify() (bool, error) {
var hash []byte
var err error
if a.Message.ProverProviderType == ProverProviderTypeUndefined {
// for backward compatibility, calculate hash without ProverProviderType
hash, err = Hash(a.Message.ToMessageWithoutProverProviderType())
} else {
hash, err = Hash(a.Message)
}
if err != nil {
return false, err
}
expectedPubKey, err := a.Message.DecodeAndUnmarshalPubkey(a.PublicKey)
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
isValid := crypto.VerifySignature(crypto.CompressPubkey(expectedPubKey), hash, sig[:len(sig)-1])
return isValid, nil
}
func (m *Message) ToMessageWithoutProverProviderType() MessageWithoutProverProviderType {
return MessageWithoutProverProviderType{
Challenge: m.Challenge,
ProverVersion: m.ProverVersion,
ProverName: m.ProverName,
ProverTypes: m.ProverTypes,
VKs: m.VKs,
}
}
// DecodeAndUnmarshalPubkey decodes a hex-encoded public key and unmarshal it into an ecdsa.PublicKey
func (i *Message) DecodeAndUnmarshalPubkey(pubKeyHex string) (*ecdsa.PublicKey, error) {
// Decode hex string to bytes
byteKey, err := hex.DecodeString(pubKeyHex)
if err != nil {
return nil, err
}
// Unmarshal bytes to ECDSA public key
pubKey, err := crypto.DecompressPubkey(byteKey)
if err != nil {
return nil, err
}
return pubKey, nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func Hash(i interface{}) ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

Some files were not shown because too many files have changed in this diff Show More