Compare commits

..

1 Commits

Author SHA1 Message Date
Mengran Lan
79f86c879e fix(coordinator): fix issue devnet's coordinator can not set genesis's curieBlock to 0 2024-07-18 18:19:15 +08:00
162 changed files with 4519 additions and 9345 deletions

View File

@@ -1,15 +0,0 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-US"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: true
drafts: false
chat:
auto_reply: true

View File

@@ -49,8 +49,8 @@ jobs:
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: | tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -94,8 +94,8 @@ jobs:
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: | tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -139,8 +139,8 @@ jobs:
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: | tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -184,8 +184,8 @@ jobs:
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: | tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -229,8 +229,8 @@ jobs:
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: | tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -274,8 +274,8 @@ jobs:
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: | tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -318,8 +318,8 @@ jobs:
file: ./build/dockerfiles/coordinator-api.Dockerfile file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true push: true
tags: | tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
@@ -363,7 +363,7 @@ jobs:
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: | tags: |
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
scrolltech/${{ env.REPOSITORY }}:latest ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }} ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest ${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest

View File

@@ -1,12 +1,12 @@
.PHONY: fmt dev_docker build_test_docker run_test_docker clean update .PHONY: fmt dev_docker build_test_docker run_test_docker clean update
L2GETH_TAG=scroll-v5.6.3 L2GETH_TAG=scroll-v5.3.0
help: ## Display this help message help: ## Display this help message
@grep -h \ @grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \ -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
update: ## Update dependencies update:
go work sync go work sync
cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy cd $(PWD)/bridge-history-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG}&& go mod tidy
@@ -15,14 +15,14 @@ update: ## Update dependencies
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
lint: ## The code's format and security checks lint: ## The code's format and security checks.
make -C rollup lint make -C rollup lint
make -C common lint make -C common lint
make -C coordinator lint make -C coordinator lint
make -C database lint make -C database lint
make -C bridge-history-api lint make -C bridge-history-api lint
fmt: ## Format the code fmt: ## format the code
go work sync go work sync
cd $(PWD)/bridge-history-api/ && go mod tidy cd $(PWD)/bridge-history-api/ && go mod tidy
cd $(PWD)/common/ && go mod tidy cd $(PWD)/common/ && go mod tidy
@@ -38,10 +38,10 @@ fmt: ## Format the code
goimports -local $(PWD)/rollup/ -w . goimports -local $(PWD)/rollup/ -w .
goimports -local $(PWD)/tests/integration-test/ -w . goimports -local $(PWD)/tests/integration-test/ -w .
dev_docker: ## Build docker images for development/testing usages dev_docker: ## build docker images for development/testing usages
docker pull postgres docker pull postgres
docker build -t scroll_l1geth --platform linux/amd64 ./common/testcontainers/docker/l1geth/ docker build -t scroll_l1geth ./common/testcontainers/docker/l1geth/
docker build -t scroll_l2geth --platform linux/amd64 ./common/testcontainers/docker/l2geth/ docker build -t scroll_l2geth ./common/testcontainers/docker/l2geth/
clean: ## Empty out the bin folder clean: ## Empty out the bin folder
@rm -rf build/bin @rm -rf build/bin

View File

@@ -16,9 +16,10 @@
├── <a href="./common/">common</a>: Common libraries and types ├── <a href="./common/">common</a>: Common libraries and types
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers ├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
├── <a href="./database">database</a>: Database client and schema definition ├── <a href="./database">database</a>: Database client and schema definition
├── <a href="./src">l2geth</a>: Scroll execution node
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit ├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
├── <a href="./rollup">rollup</a>: Rollup-related services ├── <a href="./rollup">rollup</a>: Rollup-related services
├── <a href="https://github.com/scroll-tech/scroll-contracts.git">scroll-contracts</a>: solidity code for Scroll L1 bridge and rollup contracts and L2 bridge and pre-deployed contracts. ├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
└── <a href="./tests">tests</a>: Integration tests └── <a href="./tests">tests</a>: Integration tests
</pre> </pre>

File diff suppressed because one or more lines are too long

View File

@@ -19,9 +19,7 @@
"ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556", "ScrollChainAddr": "0xa13BAF47339d63B743e7Da8741db5456DAc1E556",
"GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6", "GatewayRouterAddr": "0xF8B1378579659D8F7EE5f3C929c2f3E332E41Fd6",
"MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B", "MessageQueueAddr": "0x0d7E906BD9cAFa154b048cFa766Cc1E54E39AF9B",
"BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4", "BatchBridgeGatewayAddr": "0x5Bcfd99c34cf7E06fc756f6f5aE7400504852bc4"
"GasTokenGatewayAddr": "0x0000000000000000000000000000000000000000",
"WrappedTokenGatewayAddr": "0x0000000000000000000000000000000000000000"
}, },
"L2": { "L2": {
"confirmation": 0, "confirmation": 0,

View File

@@ -8,10 +8,10 @@ require (
github.com/go-redis/redis/v8 v8.11.5 github.com/go-redis/redis/v8 v8.11.5
github.com/pressly/goose/v3 v3.16.0 github.com/pressly/goose/v3 v3.16.0
github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.7.0 golang.org/x/sync v0.6.0
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
) )
@@ -19,7 +19,7 @@ require (
dario.cat/mergo v1.0.0 // indirect dario.cat/mergo v1.0.0 // indirect
github.com/VictoriaMetrics/fastcache v1.12.1 // indirect github.com/VictoriaMetrics/fastcache v1.12.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/bits-and-blooms/bitset v1.12.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/bytedance/sonic v1.10.1 // indirect github.com/bytedance/sonic v1.10.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
@@ -36,7 +36,7 @@ require (
github.com/docker/docker v26.1.0+incompatible // indirect github.com/docker/docker v26.1.0+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-connections v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/fjl/memsize v0.0.2 // indirect github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect
@@ -57,7 +57,7 @@ require (
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.4 // indirect github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackc/pgx/v5 v5.5.4 // indirect github.com/jackc/pgx/v5 v5.5.4 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect
@@ -65,6 +65,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.4 // indirect github.com/klauspost/compress v1.17.4 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/leodido/go-urn v1.2.4 // indirect github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-isatty v0.0.20 // indirect
@@ -89,28 +90,29 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb // indirect github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect
github.com/sethvargo/go-retry v0.2.4 // indirect github.com/sethvargo/go-retry v0.2.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/status-im/keycard-go v0.2.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.12 // indirect github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect github.com/tklauser/numcpus v0.6.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.5.0 // indirect golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.24.0 // indirect golang.org/x/crypto v0.19.0 // indirect
golang.org/x/net v0.25.0 // indirect golang.org/x/mod v0.16.0 // indirect
golang.org/x/sys v0.21.0 // indirect golang.org/x/net v0.20.0 // indirect
golang.org/x/text v0.16.0 // indirect golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.17.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/protobuf v1.33.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -23,8 +23,8 @@ github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHG
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -61,6 +61,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0q
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI= github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc= github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -86,8 +87,8 @@ github.com/elastic/go-sysinfo v1.11.1 h1:g9mwl05njS4r69TisC+vwHWTSKywZFYYUu3so3T
github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E= github.com/elastic/go-sysinfo v1.11.1/go.mod h1:6KQb31j0QeWBDF88jIdWSxE8cwoOB9tO4Y4osN7Q70E=
github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0=
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s= github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -175,8 +176,8 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
@@ -308,12 +309,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA= github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
@@ -340,14 +339,14 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
@@ -370,8 +369,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd h1:dz
github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= github.com/ydb-platform/ydb-go-genproto v0.0.0-20231012155159-f85a672542fd/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA= github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2 h1:E0yUuuX7UmPxXm92+yQCjMveLFO3zfvYFIJVuAqsVRA=
github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU= github.com/ydb-platform/ydb-go-sdk/v3 v3.54.2/go.mod h1:fjBLQ2TdQNl4bMjuWl9adoTGBypwUTPoGC+EqYqiIcU=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
@@ -384,21 +383,21 @@ golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -415,18 +414,20 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=

View File

@@ -6,7 +6,6 @@ import (
"path/filepath" "path/filepath"
"scroll-tech/common/database" "scroll-tech/common/database"
"scroll-tech/common/utils"
) )
// FetcherConfig is the configuration of Layer1 or Layer2 fetcher. // FetcherConfig is the configuration of Layer1 or Layer2 fetcher.
@@ -31,8 +30,6 @@ type FetcherConfig struct {
GatewayRouterAddr string `json:"GatewayRouterAddr"` GatewayRouterAddr string `json:"GatewayRouterAddr"`
MessageQueueAddr string `json:"MessageQueueAddr"` MessageQueueAddr string `json:"MessageQueueAddr"`
BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"` BatchBridgeGatewayAddr string `json:"BatchBridgeGatewayAddr"`
GasTokenGatewayAddr string `json:"GasTokenGatewayAddr"`
WrappedTokenGatewayAddr string `json:"WrappedTokenGatewayAddr"`
} }
// RedisConfig redis config // RedisConfig redis config
@@ -67,11 +64,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err return nil, err
} }
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_BRIDGE_HISTORY")
if err != nil {
return nil, err
}
return cfg, nil return cfg, nil
} }

View File

@@ -2,7 +2,7 @@ package logic
import ( import (
"context" "context"
"errors" "fmt"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
@@ -153,7 +153,7 @@ func (b *EventUpdateLogic) updateL2WithdrawMessageInfos(ctx context.Context, bat
if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce { if withdrawTrie.NextMessageNonce != l2WithdrawMessages[0].MessageNonce {
log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actual next message nonce", l2WithdrawMessages[0].MessageNonce) log.Error("nonce mismatch", "expected next message nonce", withdrawTrie.NextMessageNonce, "actual next message nonce", l2WithdrawMessages[0].MessageNonce)
return errors.New("nonce mismatch") return fmt.Errorf("nonce mismatch")
} }
messageHashes := make([]common.Hash, len(l2WithdrawMessages)) messageHashes := make([]common.Hash, len(l2WithdrawMessages))

View File

@@ -407,7 +407,7 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs []
return err return err
} }
} else { } else {
// The transactions are sorted, thus we set the score as their index. // The transactions are sorted, thus we set the score as their indices.
for _, tx := range txs { for _, tx := range txs {
txBytes, err := json.Marshal(tx) txBytes, err := json.Marshal(tx)
if err != nil { if err != nil {

View File

@@ -168,14 +168,6 @@ func (e *L1EventParser) ParseL1SingleCrossChainEventLogs(ctx context.Context, lo
lastMessage.L2TokenAddress = event.L2Token.String() lastMessage.L2TokenAddress = event.L2Token.String()
lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs) lastMessage.TokenIDs = utils.ConvertBigIntArrayToString(event.TokenIDs)
lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts) lastMessage.TokenAmounts = utils.ConvertBigIntArrayToString(event.TokenAmounts)
case backendabi.L1DepositWrappedTokenSig:
event := backendabi.WrappedTokenMessageEvent{}
if err := utils.UnpackLog(backendabi.L1WrappedTokenGatewayABI, &event, "DepositWrappedToken", vlog); err != nil {
log.Error("Failed to unpack DepositWrappedToken event", "err", err)
return nil, nil, err
}
lastMessage := l1DepositMessages[len(l1DepositMessages)-1]
lastMessage.Sender = event.From.String()
case backendabi.L1SentMessageEventSig: case backendabi.L1SentMessageEventSig:
event := backendabi.L1SentMessageEvent{} event := backendabi.L1SentMessageEvent{}
if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil { if err := utils.UnpackLog(backendabi.IL1ScrollMessengerABI, &event, "SentMessage", vlog); err != nil {
@@ -328,16 +320,6 @@ func (e *L1EventParser) ParseL1MessageQueueEventLogs(logs []types.Log, l1Deposit
QueueIndex: index, QueueIndex: index,
}) })
} }
case backendabi.L1ResetDequeuedTransactionEventSig:
event := backendabi.L1ResetDequeuedTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "ResetDequeuedTransaction", vlog); err != nil {
log.Error("Failed to unpack ResetDequeuedTransaction event", "err", err)
return nil, err
}
l1MessageQueueEvents = append(l1MessageQueueEvents, &orm.MessageQueueEvent{
EventType: btypes.MessageQueueEventTypeResetDequeuedTransaction,
QueueIndex: event.StartIndex.Uint64(),
})
case backendabi.L1DropTransactionEventSig: case backendabi.L1DropTransactionEventSig:
event := backendabi.L1DropTransactionEvent{} event := backendabi.L1DropTransactionEvent{}
if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil { if err := utils.UnpackLog(backendabi.IL1MessageQueueABI, &event, "DropTransaction", vlog); err != nil {

View File

@@ -51,8 +51,11 @@ type L1FetcherLogic struct {
// NewL1FetcherLogic creates L1 fetcher logic // NewL1FetcherLogic creates L1 fetcher logic
func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic { func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient.Client) *L1FetcherLogic {
addressList := []common.Address{ addressList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr), common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr), common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr), common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr), common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -66,8 +69,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
} }
gatewayList := []common.Address{ gatewayList := []common.Address{
common.HexToAddress(cfg.ETHGatewayAddr),
common.HexToAddress(cfg.StandardERC20GatewayAddr), common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr), common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr), common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr), common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -99,26 +105,6 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr)) gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
} }
if common.HexToAddress(cfg.ETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.ETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.ETHGatewayAddr))
}
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
if common.HexToAddress(cfg.GasTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.GasTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.GasTokenGatewayAddr))
}
if common.HexToAddress(cfg.WrappedTokenGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WrappedTokenGatewayAddr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList) log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{ f := &L1FetcherLogic{
@@ -224,7 +210,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
Topics: make([][]common.Hash, 1), Topics: make([][]common.Hash, 1),
} }
query.Topics[0] = make([]common.Hash, 16) query.Topics[0] = make([]common.Hash, 14)
query.Topics[0][0] = backendabi.L1DepositETHSig query.Topics[0][0] = backendabi.L1DepositETHSig
query.Topics[0][1] = backendabi.L1DepositERC20Sig query.Topics[0][1] = backendabi.L1DepositERC20Sig
query.Topics[0][2] = backendabi.L1DepositERC721Sig query.Topics[0][2] = backendabi.L1DepositERC721Sig
@@ -238,9 +224,7 @@ func (f *L1FetcherLogic) l1FetcherLogs(ctx context.Context, from, to uint64) ([]
query.Topics[0][10] = backendabi.L1QueueTransactionEventSig query.Topics[0][10] = backendabi.L1QueueTransactionEventSig
query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig query.Topics[0][11] = backendabi.L1DequeueTransactionEventSig
query.Topics[0][12] = backendabi.L1DropTransactionEventSig query.Topics[0][12] = backendabi.L1DropTransactionEventSig
query.Topics[0][13] = backendabi.L1ResetDequeuedTransactionEventSig query.Topics[0][13] = backendabi.L1BridgeBatchDepositSig
query.Topics[0][14] = backendabi.L1BridgeBatchDepositSig
query.Topics[0][15] = backendabi.L1DepositWrappedTokenSig
eventLogs, err := f.client.FilterLogs(ctx, query) eventLogs, err := f.client.FilterLogs(ctx, query)
if err != nil { if err != nil {
@@ -355,10 +339,6 @@ func (f *L1FetcherLogic) updateMetrics(res L1FilterResult) {
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1) f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_skip_message").Add(1)
case btypes.MessageQueueEventTypeDropTransaction: case btypes.MessageQueueEventTypeDropTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1) f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_drop_message").Add(1)
// one ResetDequeuedTransaction event could indicate reset multiple skipped messages,
// this metric only counts the number of events, not the number of skipped messages.
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
f.l1FetcherLogicFetchedTotal.WithLabelValues("L1_reset_skipped_messages").Add(1)
} }
} }

View File

@@ -54,6 +54,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.StandardERC20GatewayAddr), common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr), common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr), common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr), common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -67,6 +68,7 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
common.HexToAddress(cfg.StandardERC20GatewayAddr), common.HexToAddress(cfg.StandardERC20GatewayAddr),
common.HexToAddress(cfg.CustomERC20GatewayAddr), common.HexToAddress(cfg.CustomERC20GatewayAddr),
common.HexToAddress(cfg.WETHGatewayAddr),
common.HexToAddress(cfg.DAIGatewayAddr), common.HexToAddress(cfg.DAIGatewayAddr),
common.HexToAddress(cfg.ERC721GatewayAddr), common.HexToAddress(cfg.ERC721GatewayAddr),
@@ -98,11 +100,6 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr)) gatewayList = append(gatewayList, common.HexToAddress(cfg.BatchBridgeGatewayAddr))
} }
if common.HexToAddress(cfg.WETHGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.WETHGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.WETHGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList) log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L2FetcherLogic{ f := &L2FetcherLogic{

View File

@@ -217,12 +217,6 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex) db = db.Where("message_nonce = ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage) db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped txStatusUpdateFields["tx_status"] = types.TxStatusTypeDropped
case btypes.MessageQueueEventTypeResetDequeuedTransaction:
db = db.Where("tx_status = ?", types.TxStatusTypeSkipped)
// reset skipped messages that the nonce is greater than or equal to the queue index.
db = db.Where("message_nonce >= ?", l1MessageQueueEvent.QueueIndex)
db = db.Where("message_type = ?", btypes.MessageTypeL1SentMessage)
txStatusUpdateFields["tx_status"] = types.TxStatusTypeSent
} }
if err := db.Updates(txStatusUpdateFields).Error; err != nil { if err := db.Updates(txStatusUpdateFields).Error; err != nil {
return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err) return fmt.Errorf("failed to update tx statuses of L1 message queue events, update fields: %v, error: %w", txStatusUpdateFields, err)
@@ -236,7 +230,7 @@ func (c *CrossMessage) UpdateL1MessageQueueEventsInfo(ctx context.Context, l1Mes
db = db.Model(&CrossMessage{}) db = db.Model(&CrossMessage{})
txHashUpdateFields := make(map[string]interface{}) txHashUpdateFields := make(map[string]interface{})
switch l1MessageQueueEvent.EventType { switch l1MessageQueueEvent.EventType {
case btypes.MessageQueueEventTypeDequeueTransaction, btypes.MessageQueueEventTypeResetDequeuedTransaction: case btypes.MessageQueueEventTypeDequeueTransaction:
continue continue
case btypes.MessageQueueEventTypeQueueTransaction: case btypes.MessageQueueEventTypeQueueTransaction:
// only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out. // only replayMessages or enforced txs (whose message hashes would not be found), sendMessages have been filtered out.

View File

@@ -70,7 +70,6 @@ const (
MessageQueueEventTypeQueueTransaction MessageQueueEventTypeQueueTransaction
MessageQueueEventTypeDequeueTransaction MessageQueueEventTypeDequeueTransaction
MessageQueueEventTypeDropTransaction MessageQueueEventTypeDropTransaction
MessageQueueEventTypeResetDequeuedTransaction
) )
// BatchStatusType represents the type of batch status. // BatchStatusType represents the type of batch status.

View File

@@ -38,7 +38,7 @@ func GetBlockNumber(ctx context.Context, client *ethclient.Client, confirmations
// @todo: add unit test. // @todo: add unit test.
func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error { func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.Events[event].ID { if log.Topics[0] != c.Events[event].ID {
return errors.New("event signature mismatch") return fmt.Errorf("event signature mismatch")
} }
if len(log.Data) > 0 { if len(log.Data) > 0 {
if err := c.UnpackIntoInterface(out, event, log.Data); err != nil { if err := c.UnpackIntoInterface(out, event, log.Data); err != nil {
@@ -66,55 +66,32 @@ func ComputeMessageHash(
return common.BytesToHash(crypto.Keccak256(data)) return common.BytesToHash(crypto.Keccak256(data))
} }
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
// GetBatchRangeFromCalldata find the block range from calldata, both inclusive. // GetBatchRangeFromCalldata find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) { func GetBatchRangeFromCalldata(calldata []byte) (uint64, uint64, error) {
const methodIDLength = 4 method := backendabi.IScrollChainABI.Methods["commitBatch"]
if len(txData) < methodIDLength { values, err := method.Inputs.Unpack(calldata[4:])
return 0, 0, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength)
}
method, err := backendabi.IScrollChainABI.MethodById(txData[:methodIDLength])
if err != nil { if err != nil {
return 0, 0, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) // special case: import genesis batch
method = backendabi.IScrollChainABI.Methods["importGenesisBatch"]
_, err2 := method.Inputs.Unpack(calldata[4:])
if err2 == nil {
// genesis batch
return 0, 0, nil
}
// none of "commitBatch" and "importGenesisBatch" match, give up
return 0, 0, err
} }
values, err := method.Inputs.Unpack(txData[methodIDLength:]) args := commitBatchArgs{}
err = method.Inputs.Copy(&args, values)
if err != nil { if err != nil {
return 0, 0, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) return 0, 0, err
}
var chunks [][]byte
if method.Name == "importGenesisBatch" {
return 0, 0, nil
} else if method.Name == "commitBatch" {
type commitBatchArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
}
var args commitBatchArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
} else if method.Name == "commitBatchWithBlobProof" {
type commitBatchWithBlobProofArgs struct {
Version uint8
ParentBatchHeader []byte
Chunks [][]byte
SkippedL1MessageBitmap []byte
BlobDataProof []byte
}
var args commitBatchWithBlobProofArgs
if err = method.Inputs.Copy(&args, values); err != nil {
return 0, 0, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err)
}
chunks = args.Chunks
} }
var startBlock uint64 var startBlock uint64
@@ -123,14 +100,14 @@ func GetBatchRangeFromCalldata(txData []byte) (uint64, uint64, error) {
// decode blocks from chunk and assume that there's no empty chunk // decode blocks from chunk and assume that there's no empty chunk
// | 1 byte | 60 bytes | ... | 60 bytes | // | 1 byte | 60 bytes | ... | 60 bytes |
// | num blocks | block 1 | ... | block n | // | num blocks | block 1 | ... | block n |
if len(chunks) == 0 { if len(args.Chunks) == 0 {
return 0, 0, errors.New("invalid chunks") return 0, 0, errors.New("invalid chunks")
} }
chunk := chunks[0] chunk := args.Chunks[0]
block := chunk[1:61] // first block in chunk block := chunk[1:61] // first block in chunk
startBlock = binary.BigEndian.Uint64(block[0:8]) startBlock = binary.BigEndian.Uint64(block[0:8])
chunk = chunks[len(chunks)-1] chunk = args.Chunks[len(args.Chunks)-1]
lastBlockIndex := int(chunk[0]) - 1 lastBlockIndex := int(chunk[0]) - 1
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
finishBlock = binary.BigEndian.Uint64(block[0:8]) finishBlock = binary.BigEndian.Uint64(block[0:8])

File diff suppressed because one or more lines are too long

View File

@@ -17,7 +17,7 @@ RUN --mount=target=. \
FROM ubuntu:20.04 FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl" ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
COPY --from=builder /bin/bridgehistoryapi-api /bin/ COPY --from=builder /bin/bridgehistoryapi-api /bin/
WORKDIR /app WORKDIR /app
ENTRYPOINT ["bridgehistoryapi-api"] ENTRYPOINT ["bridgehistoryapi-api"]

View File

@@ -17,8 +17,7 @@ RUN --mount=target=. \
FROM ubuntu:20.04 FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl" ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install ca-certificates vim netcat-openbsd net-tools curl -y
RUN update-ca-certificates
COPY --from=builder /bin/bridgehistoryapi-fetcher /bin/ COPY --from=builder /bin/bridgehistoryapi-fetcher /bin/
WORKDIR /app WORKDIR /app
ENTRYPOINT ["bridgehistoryapi-fetcher"] ENTRYPOINT ["bridgehistoryapi-fetcher"]

View File

@@ -40,7 +40,6 @@ FROM ubuntu:20.04
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/lib
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl" ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
# ENV CHAIN_ID=534353 # ENV CHAIN_ID=534353
RUN apt update && apt install vim netcat-openbsd net-tools curl jq -y
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/coordinator_api /bin/ COPY --from=builder /bin/coordinator_api /bin/

View File

@@ -19,8 +19,9 @@ RUN --mount=target=. \
# Pull coordinator into a second stage deploy ubuntu container # Pull coordinator into a second stage deploy ubuntu container
FROM ubuntu:20.04 FROM ubuntu:20.04
ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl" ENV CGO_LDFLAGS="-Wl,--no-as-needed -ldl"
RUN apt update && apt install vim netcat-openbsd net-tools curl -y
COPY --from=builder /bin/coordinator_cron /bin/ COPY --from=builder /bin/coordinator_cron /bin/
WORKDIR /app WORKDIR /app
ENTRYPOINT ["coordinator_cron"] ENTRYPOINT ["coordinator_cron"]

View File

@@ -21,8 +21,6 @@ RUN --mount=target=. \
# Pull gas_oracle into a second stage deploy ubuntu container # Pull gas_oracle into a second stage deploy ubuntu container
FROM ubuntu:20.04 FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl" ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/gas_oracle /bin/ COPY --from=builder /bin/gas_oracle /bin/

View File

@@ -21,8 +21,6 @@ RUN --mount=target=. \
# Pull rollup_relayer into a second stage deploy ubuntu container # Pull rollup_relayer into a second stage deploy ubuntu container
FROM ubuntu:20.04 FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl" ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/rollup_relayer /bin/ COPY --from=builder /bin/rollup_relayer /bin/

View File

@@ -1,56 +1,89 @@
package forks package forks
import ( import (
"math"
"math/big" "math/big"
"sort"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/params"
) )
// GetHardforkName returns the name of the hardfork active at the given block height and timestamp. // CollectSortedForkHeights returns a sorted set of block numbers that one or more forks are activated on
// It checks the chain configuration to determine which hardfork is active. func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool, map[string]uint64) {
func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uint64) string { type nameFork struct {
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) { name string
return "homestead" block *big.Int
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) {
return "bernoulli"
} else if !config.IsDarwin(blockTimestamp) {
return "curie"
} else if !config.IsDarwinV2(blockTimestamp) {
return "darwin"
} else {
return "darwinV2"
} }
forkHeightNameMap := make(map[uint64]string)
for _, fork := range []nameFork{
{name: "homestead", block: config.HomesteadBlock},
{name: "daoFork", block: config.DAOForkBlock},
{name: "eip150", block: config.EIP150Block},
{name: "eip155", block: config.EIP155Block},
{name: "eip158", block: config.EIP158Block},
{name: "byzantium", block: config.ByzantiumBlock},
{name: "constantinople", block: config.ConstantinopleBlock},
{name: "petersburg", block: config.PetersburgBlock},
{name: "istanbul", block: config.IstanbulBlock},
{name: "muirGlacier", block: config.MuirGlacierBlock},
{name: "berlin", block: config.BerlinBlock},
{name: "london", block: config.LondonBlock},
{name: "arrowGlacier", block: config.ArrowGlacierBlock},
{name: "archimedes", block: config.ArchimedesBlock},
{name: "shanghai", block: config.ShanghaiBlock},
{name: "bernoulli", block: config.BernoulliBlock},
{name: "curie", block: config.CurieBlock},
} {
if fork.block == nil {
continue
}
height := fork.block.Uint64()
// only keep latest fork for at each height, discard the rest
forkHeightNameMap[height] = fork.name
}
forkHeightsMap := make(map[uint64]bool)
forkNameHeightMap := make(map[string]uint64)
for height, name := range forkHeightNameMap {
forkHeightsMap[height] = true
forkNameHeightMap[name] = height
}
var forkHeights []uint64
for height := range forkHeightsMap {
forkHeights = append(forkHeights, height)
}
sort.Slice(forkHeights, func(i, j int) bool {
return forkHeights[i] < forkHeights[j]
})
return forkHeights, forkHeightsMap, forkNameHeightMap
} }
// GetCodecVersion returns the encoding codec version for the given block height and timestamp. // BlocksUntilFork returns the number of blocks until the next fork
// It determines the appropriate codec version based on the active hardfork. // returns 0 if there is no fork scheduled for the future
func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uint64) encoding.CodecVersion { func BlocksUntilFork(blockHeight uint64, forkHeights []uint64) uint64 {
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) { for _, forkHeight := range forkHeights {
return encoding.CodecV0 if forkHeight > blockHeight {
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) { return forkHeight - blockHeight
return encoding.CodecV1 }
} else if !config.IsDarwin(blockTimestamp) {
return encoding.CodecV2
} else if !config.IsDarwinV2(blockTimestamp) {
return encoding.CodecV3
} else {
return encoding.CodecV4
} }
return 0
} }
// GetMaxChunksPerBatch returns the maximum number of chunks allowed per batch for the given block height and timestamp. // BlockRange returns the block range of the hard fork
// This value may change depending on the active hardfork. // Need ensure the forkHeights is incremental
func GetMaxChunksPerBatch(config *params.ChainConfig, blockHeight, blockTimestamp uint64) uint64 { func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64) {
if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) { to = math.MaxInt64
return 15 for _, height := range forkHeights {
} else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) { if currentForkHeight < height {
return 15 to = height
} else if !config.IsDarwin(blockTimestamp) { return
return 45 }
} else if !config.IsDarwinV2(blockTimestamp) { from = height
return 45
} else {
return 45
} }
return
} }

142
common/forks/forks_test.go Normal file
View File

@@ -0,0 +1,142 @@
package forks
import (
"math"
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/require"
)
func TestCollectSortedForkBlocks(t *testing.T) {
l, m, n := CollectSortedForkHeights(&params.ChainConfig{
ArchimedesBlock: big.NewInt(0),
ShanghaiBlock: big.NewInt(3),
BernoulliBlock: big.NewInt(3),
CurieBlock: big.NewInt(4),
})
require.Equal(t, l, []uint64{
0,
3,
4,
})
require.Equal(t, map[uint64]bool{
3: true,
4: true,
0: true,
}, m)
require.Equal(t, map[string]uint64{
"archimedes": 0,
"bernoulli": 3,
"curie": 4,
}, n)
}
func TestBlocksUntilFork(t *testing.T) {
tests := map[string]struct {
block uint64
forks []uint64
expected uint64
}{
"NoFork": {
block: 44,
forks: []uint64{},
expected: 0,
},
"BeforeFork": {
block: 0,
forks: []uint64{1, 5},
expected: 1,
},
"OnFork": {
block: 1,
forks: []uint64{1, 5},
expected: 4,
},
"OnLastFork": {
block: 5,
forks: []uint64{1, 5},
expected: 0,
},
"AfterFork": {
block: 5,
forks: []uint64{1, 5},
expected: 0,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
require.Equal(t, test.expected, BlocksUntilFork(test.block, test.forks))
})
}
}
func TestBlockRange(t *testing.T) {
tests := []struct {
name string
forkHeight uint64
forkHeights []uint64
expectedFrom uint64
expectedTo uint64
}{
{
name: "ToInfinite",
forkHeight: 300,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 300,
expectedTo: math.MaxInt64,
},
{
name: "To300",
forkHeight: 200,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 200,
expectedTo: 300,
},
{
name: "To200",
forkHeight: 100,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 100,
expectedTo: 200,
},
{
name: "To100",
forkHeight: 0,
forkHeights: []uint64{100, 200, 300},
expectedFrom: 0,
expectedTo: 100,
},
{
name: "To200-1",
forkHeight: 100,
forkHeights: []uint64{100, 200},
expectedFrom: 100,
expectedTo: 200,
},
{
name: "To2",
forkHeight: 1,
forkHeights: []uint64{1, 2},
expectedFrom: 1,
expectedTo: 2,
},
{
name: "ToInfinite-1",
forkHeight: 0,
forkHeights: []uint64{0},
expectedFrom: 0,
expectedTo: math.MaxInt64,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
from, to := BlockRange(test.forkHeight, test.forkHeights)
require.Equal(t, test.expectedFrom, from)
require.Equal(t, test.expectedTo, to)
})
}
}

View File

@@ -4,7 +4,7 @@ go 1.21
require ( require (
github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/semver/v3 v3.2.1
github.com/bits-and-blooms/bitset v1.13.0 github.com/bits-and-blooms/bitset v1.12.0
github.com/docker/docker v26.1.0+incompatible github.com/docker/docker v26.1.0+incompatible
github.com/gin-contrib/pprof v1.4.0 github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1 github.com/gin-gonic/gin v1.9.1
@@ -13,8 +13,7 @@ require (
github.com/modern-go/reflect2 v1.0.2 github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.30.0 github.com/testcontainers/testcontainers-go v0.30.0
github.com/testcontainers/testcontainers-go/modules/compose v0.30.0 github.com/testcontainers/testcontainers-go/modules/compose v0.30.0
@@ -78,7 +77,7 @@ require (
github.com/docker/go-units v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fjl/memsize v0.0.2 // indirect github.com/fjl/memsize v0.0.2 // indirect
github.com/fsnotify/fsevents v0.1.1 // indirect github.com/fsnotify/fsevents v0.1.1 // indirect
@@ -120,7 +119,7 @@ require (
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.4 // indirect github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/imdario/mergo v0.3.16 // indirect github.com/imdario/mergo v0.3.16 // indirect
github.com/in-toto/in-toto-golang v0.5.0 // indirect github.com/in-toto/in-toto-golang v0.5.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
@@ -183,7 +182,7 @@ require (
github.com/rjeczalik/notify v0.9.1 // indirect github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rs/cors v1.7.0 // indirect github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect
@@ -195,12 +194,12 @@ require (
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.4.0 // indirect github.com/spf13/viper v1.4.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.12 // indirect github.com/supranational/blst v0.3.11 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect github.com/tklauser/numcpus v0.6.1 // indirect
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
@@ -211,7 +210,7 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
@@ -230,17 +229,17 @@ require (
go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/mock v0.4.0 // indirect go.uber.org/mock v0.4.0 // indirect
golang.org/x/arch v0.5.0 // indirect golang.org/x/arch v0.5.0 // indirect
golang.org/x/crypto v0.24.0 // indirect golang.org/x/crypto v0.19.0 // indirect
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
golang.org/x/mod v0.17.0 // indirect golang.org/x/mod v0.16.0 // indirect
golang.org/x/net v0.25.0 // indirect golang.org/x/net v0.20.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/sync v0.7.0 // indirect golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect
golang.org/x/term v0.21.0 // indirect golang.org/x/term v0.17.0 // indirect
golang.org/x/text v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/tools v0.17.0 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect

View File

@@ -70,8 +70,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
@@ -212,8 +212,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s= github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA=
@@ -384,8 +384,8 @@ github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXei
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
@@ -633,12 +633,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb h1:uOKdmDT0LsuS3gfynEjR4zA3Ooh6p2Z3O+IMRj2r8LA= github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/da-codec v0.0.0-20240730031611-1b736159d5cb/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ= github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
@@ -702,8 +700,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E= github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E=
@@ -716,12 +714,10 @@ github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4D
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw= github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA= github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g= github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw= github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302 h1:ZT8ibgassurSISJ1Pj26NsM3vY2jxFZn63Nd/TpHmRw=
github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI= github.com/tonistiigi/fsutil v0.0.0-20230825212630-f09800878302/go.mod h1:9kMVqMyQ/Sx2df5LtnGG+nbrmiZzCS7V6gjW3oGHsvI=
@@ -758,9 +754,8 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 h1:DZH5n7L3L8RxKdSyJHZt7WePgwdhHnPhQFdQSJaHF+o= github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300 h1:DZH5n7L3L8RxKdSyJHZt7WePgwdhHnPhQFdQSJaHF+o=
github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w= github.com/zmap/zcrypto v0.0.0-20230310154051-c8b263fd8300/go.mod h1:mOd4yUMgn2fe2nV9KXsa9AyQBFZGzygVPovsZR+Rl5w=
github.com/zmap/zlint/v3 v3.5.0 h1:Eh2B5t6VKgVH0DFmTwOqE50POvyDhUaU9T2mJOe1vfQ= github.com/zmap/zlint/v3 v3.5.0 h1:Eh2B5t6VKgVH0DFmTwOqE50POvyDhUaU9T2mJOe1vfQ=
@@ -820,8 +815,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
@@ -831,8 +826,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -851,8 +846,8 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
@@ -864,8 +859,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -906,21 +901,21 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -935,8 +930,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -6,8 +6,6 @@ export RUST_BACKTRACE=full
export RUST_LOG=debug export RUST_LOG=debug
export RUST_MIN_STACK=100000000 export RUST_MIN_STACK=100000000
export PROVER_OUTPUT_DIR=test_zkp_test export PROVER_OUTPUT_DIR=test_zkp_test
export SCROLL_PROVER_ASSETS_DIR=/assets/test_assets
export DARWIN_V2_TEST_DIR=/assets
#export LD_LIBRARY_PATH=/:/usr/local/cuda/lib64 #export LD_LIBRARY_PATH=/:/usr/local/cuda/lib64
mkdir -p $PROVER_OUTPUT_DIR mkdir -p $PROVER_OUTPUT_DIR
@@ -15,16 +13,32 @@ mkdir -p $PROVER_OUTPUT_DIR
REPO=$(realpath ../..) REPO=$(realpath ../..)
function build_test_bins() { function build_test_bins() {
cd impl
cargo build --release
ln -f -s $(realpath target/release/libzkp.so) $REPO/prover/core/lib
ln -f -s $(realpath target/release/libzkp.so) $REPO/coordinator/internal/logic/verifier/lib
cd $REPO/prover cd $REPO/prover
make tests_binary go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
cd $REPO/coordinator cd $REPO/coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd $REPO/common/libzkp cd $REPO/common/libzkp
} }
function build_test_bins_old() {
cd $REPO
cd prover
make libzkp
go test -tags="gpu ffi" -timeout 0 -c core/prover_test.go
cd ..
cd coordinator
make libzkp
go test -tags="gpu ffi" -timeout 0 -c ./internal/logic/verifier
cd ..
cd common/libzkp
}
build_test_bins build_test_bins
rm -rf $PROVER_OUTPUT_DIR/* #rm -rf test_zkp_test/*
#rm -rf prover.log verifier.log #rm -rf prover.log verifier.log
$REPO/prover/prover.test --exact zk_circuits_handler::darwin_v2::tests::test_circuits 2>&1 | tee prover.log #$REPO/prover/core.test -test.v 2>&1 | tee prover.log
$REPO/coordinator/verifier.test -test.v 2>&1 | tee verifier.log $REPO/coordinator/verifier.test -test.v 2>&1 | tee verifier.log

View File

@@ -30,8 +30,8 @@ dependencies = [
[[package]] [[package]]
name = "aggregator" name = "aggregator"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [ dependencies = [
"ark-std 0.3.0", "ark-std 0.3.0",
"bitstream-io", "bitstream-io",
@@ -39,9 +39,9 @@ dependencies = [
"ctor", "ctor",
"encoder", "encoder",
"env_logger 0.10.0", "env_logger 0.10.0",
"eth-types 0.12.0", "eth-types",
"ethers-core", "ethers-core",
"gadgets 0.12.0", "gadgets",
"halo2-base", "halo2-base",
"halo2-ecc", "halo2-ecc",
"halo2_proofs", "halo2_proofs",
@@ -59,41 +59,7 @@ dependencies = [
"snark-verifier-sdk", "snark-verifier-sdk",
"strum 0.25.0", "strum 0.25.0",
"strum_macros 0.25.3", "strum_macros 0.25.3",
"zkevm-circuits 0.12.0", "zkevm-circuits",
]
[[package]]
name = "aggregator"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [
"ark-std 0.3.0",
"bitstream-io",
"c-kzg",
"ctor",
"encoder",
"env_logger 0.10.0",
"eth-types 0.13.0",
"ethers-core",
"gadgets 0.13.0",
"halo2-base",
"halo2-ecc",
"halo2_proofs",
"hex",
"itertools 0.11.0",
"log",
"num-bigint",
"once_cell",
"rand",
"revm-precompile",
"revm-primitives",
"serde",
"serde_json",
"snark-verifier",
"snark-verifier-sdk",
"strum 0.25.0",
"strum_macros 0.25.3",
"zkevm-circuits 0.13.0",
] ]
[[package]] [[package]]
@@ -171,9 +137,9 @@ dependencies = [
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.86" version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" checksum = "3b13c32d80ecc7ab747b80c3784bce54ee8a7a0cc4fbda9bf4cda2cf6fe90854"
[[package]] [[package]]
name = "arc-swap" name = "arc-swap"
@@ -570,47 +536,24 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]] [[package]]
name = "bus-mapping" name = "bus-mapping"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [ dependencies = [
"eth-types 0.12.0", "eth-types",
"ethers-core", "ethers-core",
"ethers-providers", "ethers-providers",
"ethers-signers", "ethers-signers",
"gadgets 0.12.0", "external-tracer",
"gadgets",
"halo2_proofs", "halo2_proofs",
"hex", "hex",
"itertools 0.11.0", "itertools 0.11.0",
"log", "log",
"mock 0.12.0", "mock",
"mpt-zktrie 0.12.0", "mpt-zktrie",
"num",
"poseidon-circuit",
"revm-precompile",
"serde",
"serde_json",
"strum 0.25.0",
"strum_macros 0.25.3",
]
[[package]]
name = "bus-mapping"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [
"eth-types 0.13.0",
"ethers-core",
"ethers-providers",
"ethers-signers",
"gadgets 0.13.0",
"halo2_proofs",
"hex",
"itertools 0.11.0",
"log",
"mock 0.13.0",
"mpt-zktrie 0.13.0",
"num", "num",
"poseidon-circuit", "poseidon-circuit",
"rand",
"revm-precompile", "revm-precompile",
"serde", "serde",
"serde_json", "serde_json",
@@ -1182,36 +1125,8 @@ dependencies = [
[[package]] [[package]]
name = "eth-types" name = "eth-types"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [
"base64 0.13.1",
"ethers-core",
"ethers-signers",
"halo2curves",
"hex",
"itertools 0.11.0",
"log",
"num",
"num-bigint",
"poseidon-base",
"regex",
"revm-precompile",
"revm-primitives",
"serde",
"serde_json",
"serde_with",
"sha3 0.10.8",
"strum 0.25.0",
"strum_macros 0.25.3",
"subtle",
"uint",
]
[[package]]
name = "eth-types"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [ dependencies = [
"base64 0.13.1", "base64 0.13.1",
"ethers-core", "ethers-core",
@@ -1228,6 +1143,7 @@ dependencies = [
"revm-primitives", "revm-primitives",
"serde", "serde",
"serde_json", "serde_json",
"serde_stacker",
"serde_with", "serde_with",
"sha3 0.10.8", "sha3 0.10.8",
"strum 0.25.0", "strum 0.25.0",
@@ -1366,24 +1282,11 @@ dependencies = [
[[package]] [[package]]
name = "external-tracer" name = "external-tracer"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [ dependencies = [
"eth-types 0.12.0", "eth-types",
"geth-utils 0.12.0", "geth-utils",
"log",
"serde",
"serde_json",
"serde_stacker",
]
[[package]]
name = "external-tracer"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [
"eth-types 0.13.0",
"geth-utils 0.13.0",
"log", "log",
"serde", "serde",
"serde_json", "serde_json",
@@ -1561,22 +1464,10 @@ dependencies = [
[[package]] [[package]]
name = "gadgets" name = "gadgets"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [ dependencies = [
"eth-types 0.12.0", "eth-types",
"halo2_proofs",
"poseidon-base",
"sha3 0.10.8",
"strum 0.25.0",
]
[[package]]
name = "gadgets"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [
"eth-types 0.13.0",
"halo2_proofs", "halo2_proofs",
"poseidon-base", "poseidon-base",
"sha3 0.10.8", "sha3 0.10.8",
@@ -1596,18 +1487,8 @@ dependencies = [
[[package]] [[package]]
name = "geth-utils" name = "geth-utils"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [
"env_logger 0.10.0",
"gobuild",
"log",
]
[[package]]
name = "geth-utils"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [ dependencies = [
"env_logger 0.10.0", "env_logger 0.10.0",
"gobuild", "gobuild",
@@ -2355,28 +2236,13 @@ dependencies = [
[[package]] [[package]]
name = "mock" name = "mock"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [ dependencies = [
"eth-types 0.12.0", "eth-types",
"ethers-core", "ethers-core",
"ethers-signers", "ethers-signers",
"external-tracer 0.12.0", "external-tracer",
"itertools 0.11.0",
"log",
"rand",
"rand_chacha",
]
[[package]]
name = "mock"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [
"eth-types 0.13.0",
"ethers-core",
"ethers-signers",
"external-tracer 0.13.0",
"itertools 0.11.0", "itertools 0.11.0",
"log", "log",
"rand", "rand",
@@ -2385,30 +2251,16 @@ dependencies = [
[[package]] [[package]]
name = "mpt-zktrie" name = "mpt-zktrie"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [ dependencies = [
"eth-types 0.12.0", "eth-types",
"halo2curves", "halo2curves",
"hex", "hex",
"log", "log",
"num-bigint", "num-bigint",
"poseidon-base", "poseidon-base",
"zktrie 0.3.0 (git+https://github.com/scroll-tech/zktrie.git?branch=main)", "zktrie",
]
[[package]]
name = "mpt-zktrie"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [
"eth-types 0.13.0",
"halo2curves",
"hex",
"log",
"num-bigint",
"poseidon-base",
"zktrie 0.3.0 (git+https://github.com/scroll-tech/zktrie.git?branch=v0.9)",
] ]
[[package]] [[package]]
@@ -2871,17 +2723,17 @@ dependencies = [
[[package]] [[package]]
name = "prover" name = "prover"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [ dependencies = [
"aggregator 0.12.0", "aggregator",
"anyhow", "anyhow",
"base64 0.13.1", "base64 0.13.1",
"blake2", "blake2",
"bus-mapping 0.12.0", "bus-mapping",
"chrono", "chrono",
"dotenvy", "dotenvy",
"eth-types 0.12.0", "eth-types",
"ethers-core", "ethers-core",
"git-version", "git-version",
"halo2_proofs", "halo2_proofs",
@@ -2889,7 +2741,7 @@ dependencies = [
"itertools 0.11.0", "itertools 0.11.0",
"log", "log",
"log4rs", "log4rs",
"mpt-zktrie 0.12.0", "mpt-zktrie",
"num-bigint", "num-bigint",
"rand", "rand",
"rand_xorshift", "rand_xorshift",
@@ -2900,41 +2752,7 @@ dependencies = [
"sha2", "sha2",
"snark-verifier", "snark-verifier",
"snark-verifier-sdk", "snark-verifier-sdk",
"zkevm-circuits 0.12.0", "zkevm-circuits",
]
[[package]]
name = "prover"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [
"aggregator 0.13.0",
"anyhow",
"base64 0.13.1",
"blake2",
"bus-mapping 0.13.0",
"chrono",
"dotenvy",
"eth-types 0.13.0",
"ethers-core",
"git-version",
"halo2_proofs",
"hex",
"itertools 0.11.0",
"log",
"log4rs",
"mpt-zktrie 0.13.0",
"num-bigint",
"rand",
"rand_xorshift",
"serde",
"serde_derive",
"serde_json",
"serde_stacker",
"sha2",
"snark-verifier",
"snark-verifier-sdk",
"zkevm-circuits 0.13.0",
] ]
[[package]] [[package]]
@@ -3140,7 +2958,7 @@ dependencies = [
[[package]] [[package]]
name = "revm-precompile" name = "revm-precompile"
version = "7.0.0" version = "7.0.0"
source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#36c304d9e9ba4e4b2d5468d91a6bd27210133b6a" source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#8543dd627348907773d8057807b6a310b276bb30"
dependencies = [ dependencies = [
"aurora-engine-modexp", "aurora-engine-modexp",
"c-kzg", "c-kzg",
@@ -3156,7 +2974,7 @@ dependencies = [
[[package]] [[package]]
name = "revm-primitives" name = "revm-primitives"
version = "4.0.0" version = "4.0.0"
source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#36c304d9e9ba4e4b2d5468d91a6bd27210133b6a" source = "git+https://github.com/scroll-tech/revm?branch=scroll-evm-executor/v36#8543dd627348907773d8057807b6a310b276bb30"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"auto_impl", "auto_impl",
@@ -3707,7 +3525,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
[[package]] [[package]]
name = "snark-verifier" name = "snark-verifier"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#58c46b74c73156b9e09dc27617369d2acfb4461b" source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#fe1f8906041ad323034881fbd808908250d44829"
dependencies = [ dependencies = [
"bytes", "bytes",
"ethereum-types", "ethereum-types",
@@ -3730,7 +3548,7 @@ dependencies = [
[[package]] [[package]]
name = "snark-verifier-sdk" name = "snark-verifier-sdk"
version = "0.0.1" version = "0.0.1"
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#58c46b74c73156b9e09dc27617369d2acfb4461b" source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#fe1f8906041ad323034881fbd808908250d44829"
dependencies = [ dependencies = [
"bincode", "bincode",
"ethereum-types", "ethereum-types",
@@ -4542,18 +4360,18 @@ dependencies = [
[[package]] [[package]]
name = "zkevm-circuits" name = "zkevm-circuits"
version = "0.12.0" version = "0.11.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.12.2#6f7b46a3b1ccf9dc448735e8455e1ac6f9e30643" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.11.4#38a68e22d3d8449bd39a50c22da55b9e741de453"
dependencies = [ dependencies = [
"array-init", "array-init",
"bus-mapping 0.12.0", "bus-mapping",
"either", "either",
"env_logger 0.10.0", "env_logger 0.10.0",
"eth-types 0.12.0", "eth-types",
"ethers-core", "ethers-core",
"ethers-signers", "ethers-signers",
"ff", "ff",
"gadgets 0.12.0", "gadgets",
"halo2-base", "halo2-base",
"halo2-ecc", "halo2-ecc",
"halo2-mpt-circuits", "halo2-mpt-circuits",
@@ -4563,50 +4381,8 @@ dependencies = [
"itertools 0.11.0", "itertools 0.11.0",
"log", "log",
"misc-precompiled-circuit", "misc-precompiled-circuit",
"mock 0.12.0", "mock",
"mpt-zktrie 0.12.0", "mpt-zktrie",
"num",
"num-bigint",
"poseidon-circuit",
"rand",
"rand_chacha",
"rand_xorshift",
"rayon",
"serde",
"serde_json",
"sha3 0.10.8",
"snark-verifier",
"snark-verifier-sdk",
"strum 0.25.0",
"strum_macros 0.25.3",
"subtle",
]
[[package]]
name = "zkevm-circuits"
version = "0.13.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.13.1#4009e5593f13ba73f64f556011ee5ef47bc4ebf3"
dependencies = [
"array-init",
"bus-mapping 0.13.0",
"either",
"env_logger 0.10.0",
"eth-types 0.13.0",
"ethers-core",
"ethers-signers",
"ff",
"gadgets 0.13.0",
"halo2-base",
"halo2-ecc",
"halo2-mpt-circuits",
"halo2_gadgets",
"halo2_proofs",
"hex",
"itertools 0.11.0",
"log",
"misc-precompiled-circuit",
"mock 0.13.0",
"mpt-zktrie 0.13.0",
"num", "num",
"num-bigint", "num-bigint",
"poseidon-circuit", "poseidon-circuit",
@@ -4628,15 +4404,13 @@ dependencies = [
name = "zkp" name = "zkp"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"anyhow",
"base64 0.13.1", "base64 0.13.1",
"env_logger 0.9.3", "env_logger 0.9.3",
"halo2_proofs", "halo2_proofs",
"libc", "libc",
"log", "log",
"once_cell", "once_cell",
"prover 0.12.0", "prover",
"prover 0.13.0",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_json", "serde_json",
@@ -4649,16 +4423,7 @@ version = "0.3.0"
source = "git+https://github.com/scroll-tech/zktrie.git?branch=main#23181f209e94137f74337b150179aeb80c72e7c8" source = "git+https://github.com/scroll-tech/zktrie.git?branch=main#23181f209e94137f74337b150179aeb80c72e7c8"
dependencies = [ dependencies = [
"gobuild", "gobuild",
"zktrie_rust 0.3.0 (git+https://github.com/scroll-tech/zktrie.git?branch=main)", "zktrie_rust",
]
[[package]]
name = "zktrie"
version = "0.3.0"
source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.9#460b8c22af65b7809164548cba1e0253b6db5a70"
dependencies = [
"gobuild",
"zktrie_rust 0.3.0 (git+https://github.com/scroll-tech/zktrie.git?branch=v0.9)",
] ]
[[package]] [[package]]
@@ -4675,20 +4440,6 @@ dependencies = [
"strum_macros 0.24.3", "strum_macros 0.24.3",
] ]
[[package]]
name = "zktrie_rust"
version = "0.3.0"
source = "git+https://github.com/scroll-tech/zktrie.git?branch=v0.9#460b8c22af65b7809164548cba1e0253b6db5a70"
dependencies = [
"hex",
"lazy_static",
"num",
"num-derive",
"num-traits",
"strum 0.24.1",
"strum_macros 0.24.3",
]
[[package]] [[package]]
name = "zstd" name = "zstd"
version = "0.13.0" version = "0.13.0"

View File

@@ -13,6 +13,8 @@ halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" } ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
#ethers-etherscan = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
#ethers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"] [patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" } halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"] [patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
@@ -23,11 +25,7 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
[dependencies] [dependencies]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" } halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] } snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.4", default-features = false, features = ["parallel_syn", "scroll"] }
# darwin
prover_v4 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.12.2", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
# darwin_v2
prover_v5 = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.13.1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
base64 = "0.13.0" base64 = "0.13.0"
env_logger = "0.9.0" env_logger = "0.9.0"
@@ -37,7 +35,6 @@ once_cell = "1.19"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_json = "1.0.66" serde_json = "1.0.66"
anyhow = "1.0.86"
[profile.test] [profile.test]
opt-level = 3 opt-level = 3

View File

@@ -0,0 +1,198 @@
use crate::{
types::{CheckChunkProofsResponse, ProofResult},
utils::{
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
OUTPUT_DIR,
},
};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
check_chunk_hashes,
consts::AGG_VK_FILENAME,
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, BatchProvingTask, BlockTrace, ChunkInfo, ChunkProof,
};
use snark_verifier_sdk::verify_evm_calldata;
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_batch_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
// VK file must exist, it is optional and logged as a warning in prover.
if !file_exists(assets_dir, &AGG_VK_FILENAME) {
panic!("{} must exist in folder {}", *AGG_VK_FILENAME, assets_dir);
}
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_batch_verify");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
let check_result: Result<bool, String> = panic_catch(|| {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
if chunk_proofs.is_empty() {
return Err("provided chunk proofs are empty.".to_string());
}
let prover_ref = PROVER.get().expect("failed to get reference to PROVER.");
let valid = prover_ref.check_protocol_of_chunks(&chunk_proofs);
Ok(valid)
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match check_result {
Ok(valid) => CheckChunkProofsResponse {
ok: valid,
error: None,
},
Err(err) => CheckChunkProofsResponse {
ok: false,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_batch_proof(
chunk_hashes: *const c_char,
chunk_proofs: *const c_char,
) -> *const c_char {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let chunk_hashes = c_char_to_vec(chunk_hashes);
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_hashes = serde_json::from_slice::<Vec<ChunkInfo>>(&chunk_hashes)
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
if chunk_hashes.len() != chunk_proofs.len() {
return Err(format!("chunk hashes and chunk proofs lengths mismatch: chunk_hashes.len() = {}, chunk_proofs.len() = {}",
chunk_hashes.len(), chunk_proofs.len()));
}
let chunk_hashes_proofs: Vec<(_,_)> = chunk_hashes
.into_iter()
.zip(chunk_proofs.clone())
.collect();
check_chunk_hashes("", &chunk_hashes_proofs).map_err(|e| format!("failed to check chunk info: {e:?}"))?;
let batch = BatchProvingTask {
chunk_proofs
};
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_agg_evm_proof(batch, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
},
Err(err) => ProofResult {
message: None,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let fork_name_str = c_char_to_str(fork_name);
let fork_id = match fork_name_str {
"bernoulli" => 2,
"curie" => 3,
_ => {
log::warn!("unexpected fork_name {fork_name_str}, treated as curie");
3
}
};
let verified = panic_catch(|| {
if fork_id == 2 {
// before upgrade#3(DA Compression)
verify_evm_calldata(
include_bytes!("plonk_verifier_0.10.3.bin").to_vec(),
proof.calldata(),
)
} else {
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
}
});
verified.unwrap_or(false) as c_char
}
// This function is only used for debugging on Go side.
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn block_traces_to_chunk_info(block_traces: *const c_char) -> *const c_char {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces).unwrap();
let witness_block = chunk_trace_to_witness_block(block_traces).unwrap();
let chunk_info = ChunkInfo::from_witness_block(&witness_block, false);
let chunk_info_bytes = serde_json::to_vec(&chunk_info).unwrap();
vec_to_c_char(chunk_info_bytes)
}

View File

@@ -0,0 +1,108 @@
use crate::{
types::ProofResult,
utils::{
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
OUTPUT_DIR,
},
};
use libc::c_char;
use prover::{
consts::CHUNK_VK_FILENAME,
utils::init_env_and_log,
zkevm::{Prover, Verifier},
BlockTrace, ChunkProof, ChunkProvingTask,
};
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_chunk_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
// VK file must exist, it is optional and logged as a warning in prover.
if !file_exists(assets_dir, &CHUNK_VK_FILENAME) {
panic!("{} must exist in folder {}", *CHUNK_VK_FILENAME, assets_dir);
}
let prover = Prover::from_dirs(params_dir, assets_dir);
PROVER.set(prover).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_dir: *const c_char) {
init_env_and_log("ffi_chunk_verify");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
let block_traces = c_char_to_vec(block_traces);
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
let chunk = ChunkProvingTask::from(block_traces);
let proof = PROVER
.get_mut()
.expect("failed to get mutable reference to PROVER.")
.gen_chunk_proof(chunk, None, None, OUTPUT_DIR.as_deref())
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
})
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
let r = match proof_result {
Ok(proof_bytes) => ProofResult {
message: Some(proof_bytes),
error: None,
},
Err(err) => ProofResult {
message: None,
error: Some(err),
},
};
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(proof: *const c_char) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
verified.unwrap_or(false) as c_char
}

View File

@@ -1,63 +1,4 @@
mod batch;
mod chunk;
mod types;
mod utils; mod utils;
mod verifier;
use crate::utils::{c_char_to_str, c_char_to_vec};
use libc::c_char;
use prover_v5::utils::init_env_and_log;
use verifier::{TaskType, VerifierConfig};
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init(config: *const c_char) {
init_env_and_log("ffi_init");
let config_str = c_char_to_str(config);
let verifier_config = serde_json::from_str::<VerifierConfig>(config_str).unwrap();
verifier::init(verifier_config);
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_chunk_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Chunk)
}
fn verify_proof(proof: *const c_char, fork_name: *const c_char, task_type: TaskType) -> c_char {
let proof = c_char_to_vec(proof);
let fork_name_str = c_char_to_str(fork_name);
let verifier = verifier::get_verifier(fork_name_str);
if let Err(e) = verifier {
log::warn!("failed to get verifier, error: {:#}", e);
return 0 as c_char;
}
match verifier.unwrap().verify(task_type, proof) {
Err(e) => {
log::error!("{:?} verify failed, error: {:#}", task_type, e);
false as c_char
}
Ok(result) => result as c_char,
}
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Batch)
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_bundle_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
verify_proof(proof, fork_name, TaskType::Bundle)
}

Binary file not shown.

View File

@@ -0,0 +1,22 @@
use serde::{Deserialize, Serialize};
// Represents the result of a chunk proof checking operation.
// `ok` indicates whether the proof checking was successful.
// `error` provides additional details in case the check failed.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CheckChunkProofsResponse {
pub ok: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}
// Encapsulates the result from generating a proof.
// `message` holds the generated proof in byte slice format.
// `error` provides additional details in case the proof generation failed.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ProofResult {
#[serde(skip_serializing_if = "Option::is_none")]
pub message: Option<Vec<u8>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
}

View File

@@ -1,9 +1,29 @@
use once_cell::sync::Lazy;
use std::{ use std::{
ffi::CStr, env,
ffi::{CStr, CString},
os::raw::c_char, os::raw::c_char,
panic::{catch_unwind, AssertUnwindSafe}, panic::{catch_unwind, AssertUnwindSafe},
path::PathBuf,
}; };
// Only used for debugging.
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
/// # Safety
#[no_mangle]
pub extern "C" fn free_c_chars(ptr: *mut c_char) {
if ptr.is_null() {
log::warn!("Try to free an empty pointer!");
return;
}
unsafe {
let _ = CString::from_raw(ptr);
}
}
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str { pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) }; let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap() cstr.to_str().unwrap()
@@ -14,6 +34,21 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
cstr.to_bytes().to_vec() cstr.to_bytes().to_vec()
} }
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
CString::new(string).unwrap().into_raw()
}
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
CString::new(bytes).unwrap().into_raw()
}
pub(crate) fn file_exists(dir: &str, filename: &str) -> bool {
let mut path = PathBuf::from(dir);
path.push(filename);
path.exists()
}
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> { pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
catch_unwind(AssertUnwindSafe(f)).map_err(|err| { catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
if let Some(s) = err.downcast_ref::<String>() { if let Some(s) = err.downcast_ref::<String>() {

View File

@@ -1,110 +0,0 @@
mod darwin;
mod darwin_v2;
use anyhow::{bail, Result};
use darwin::DarwinVerifier;
use darwin_v2::DarwinV2Verifier;
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
use prover_v4::utils::load_params;
use serde::{Deserialize, Serialize};
use std::{cell::OnceCell, collections::BTreeMap, rc::Rc};
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum TaskType {
Chunk,
Batch,
Bundle,
}
pub trait ProofVerifier {
fn verify(&self, task_type: TaskType, proof: Vec<u8>) -> Result<bool>;
}
#[derive(Debug, Serialize, Deserialize)]
pub struct CircuitConfig {
pub fork_name: String,
pub params_path: String,
pub assets_path: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VerifierConfig {
pub low_version_circuit: CircuitConfig,
pub high_version_circuit: CircuitConfig,
}
type HardForkName = String;
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
static mut PARAMS_MAP: OnceCell<BTreeMap<u32, ParamsKZG<Bn256>>> = OnceCell::new();
pub fn init(config: VerifierConfig) {
let low_conf = config.low_version_circuit;
std::env::set_var("SCROLL_PROVER_ASSETS_DIR", &low_conf.assets_path);
let params_degrees = [
*prover_v4::config::LAYER2_DEGREE,
*prover_v4::config::LAYER4_DEGREE,
];
// params should be shared between low and high
let mut params_map = BTreeMap::new();
for degree in params_degrees {
if let std::collections::btree_map::Entry::Vacant(e) = params_map.entry(degree) {
match load_params(&low_conf.params_path, degree, None) {
Ok(params) => {
e.insert(params);
}
Err(e) => panic!(
"failed to load params, degree {}, dir {}, err {}",
degree, low_conf.params_path, e
),
}
}
}
unsafe {
PARAMS_MAP.set(params_map).unwrap_unchecked();
}
let verifier = DarwinVerifier::new(unsafe { PARAMS_MAP.get().unwrap() }, &low_conf.assets_path);
unsafe {
VERIFIER_LOW
.set(VerifierPair(
low_conf.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
let high_conf = config.high_version_circuit;
let verifier =
DarwinV2Verifier::new(unsafe { PARAMS_MAP.get().unwrap() }, &high_conf.assets_path);
unsafe {
VERIFIER_HIGH
.set(VerifierPair(
high_conf.fork_name,
Rc::new(Box::new(verifier)),
))
.unwrap_unchecked();
}
}
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
unsafe {
if let Some(verifier) = VERIFIER_LOW.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
if let Some(verifier) = VERIFIER_HIGH.get() {
if verifier.0 == fork_name {
return Ok(verifier.1.clone());
}
}
}
bail!("failed to get verifier, key not found, {}", fork_name)
}

View File

@@ -1,48 +0,0 @@
use super::{ProofVerifier, TaskType};
use anyhow::Result;
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
use crate::utils::panic_catch;
use prover_v4::{
aggregator::Verifier as AggVerifier, zkevm::Verifier, BatchProof, BundleProof, ChunkProof,
};
use std::{collections::BTreeMap, env};
pub struct DarwinVerifier<'params> {
verifier: Verifier<'params>,
agg_verifier: AggVerifier<'params>,
}
impl<'params> DarwinVerifier<'params> {
pub fn new(params_map: &'params BTreeMap<u32, ParamsKZG<Bn256>>, assets_dir: &str) -> Self {
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_params_and_assets(params_map, assets_dir);
let agg_verifier = AggVerifier::from_params_and_assets(params_map, assets_dir);
Self {
verifier,
agg_verifier,
}
}
}
impl<'params> ProofVerifier for DarwinVerifier<'params> {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
let result = panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.verifier.verify_chunk_proof(proof)
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.agg_verifier.verify_batch_proof(&proof)
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.agg_verifier.verify_bundle_proof(proof)
}
});
result.map_err(|e| anyhow::anyhow!(e))
}
}

View File

@@ -1,48 +0,0 @@
use super::{ProofVerifier, TaskType};
use anyhow::Result;
use halo2_proofs::{halo2curves::bn256::Bn256, poly::kzg::commitment::ParamsKZG};
use crate::utils::panic_catch;
use prover_v5::{
aggregator::Verifier as AggVerifier, zkevm::Verifier, BatchProof, BundleProof, ChunkProof,
};
use std::{collections::BTreeMap, env};
pub struct DarwinV2Verifier<'params> {
verifier: Verifier<'params>,
agg_verifier: AggVerifier<'params>,
}
impl<'params> DarwinV2Verifier<'params> {
pub fn new(params_map: &'params BTreeMap<u32, ParamsKZG<Bn256>>, assets_dir: &str) -> Self {
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_params_and_assets(params_map, assets_dir);
let agg_verifier = AggVerifier::from_params_and_assets(params_map, assets_dir);
Self {
verifier,
agg_verifier,
}
}
}
impl<'params> ProofVerifier for DarwinV2Verifier<'params> {
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
let result = panic_catch(|| match task_type {
TaskType::Chunk => {
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
self.verifier.verify_chunk_proof(proof)
}
TaskType::Batch => {
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
self.agg_verifier.verify_batch_proof(&proof)
}
TaskType::Bundle => {
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
self.agg_verifier.verify_bundle_proof(proof)
}
});
result.map_err(|e| anyhow::anyhow!(e))
}
}

View File

@@ -1,10 +1,15 @@
// BatchVerifier is used to: void init_batch_prover(char* params_dir, char* assets_dir);
// - Verify a batch proof void init_batch_verifier(char* params_dir, char* assets_dir);
// - Verify a bundle proof char* get_batch_vk();
void init(char* config); char* check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof, char* fork_name); char verify_batch_proof(char* proof, char* fork_name);
char verify_bundle_proof(char* proof, char* fork_name); void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);
char* get_chunk_vk();
char* gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);
char verify_chunk_proof(char* proof, char* fork_name); char* block_traces_to_chunk_info(char* block_traces);
void free_c_chars(char* ptr);

View File

@@ -2,7 +2,6 @@ package testcontainers
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"log" "log"
"os" "os"
@@ -21,10 +20,9 @@ import (
// TestcontainerApps testcontainers struct // TestcontainerApps testcontainers struct
type TestcontainerApps struct { type TestcontainerApps struct {
postgresContainer *postgres.PostgresContainer postgresContainer *postgres.PostgresContainer
l2GethContainer *testcontainers.DockerContainer l2GethContainer *testcontainers.DockerContainer
poSL1Container compose.ComposeStack poSL1Container compose.ComposeStack
web3SignerContainer *testcontainers.DockerContainer
// common time stamp in nanoseconds. // common time stamp in nanoseconds.
Timestamp int Timestamp int
@@ -113,51 +111,10 @@ func (t *TestcontainerApps) StartPoSL1Container() error {
return nil return nil
} }
func (t *TestcontainerApps) StartWeb3SignerContainer(chainId int) error {
if t.web3SignerContainer != nil && t.web3SignerContainer.IsRunning() {
return nil
}
var (
err error
rootDir string
)
if rootDir, err = findProjectRootDir(); err != nil {
return fmt.Errorf("failed to find project root directory: %v", err)
}
// web3signerconf/keyconf.yaml may contain multiple keys configured and web3signer then choses one corresponding to from field of tx
web3SignerConfDir := filepath.Join(rootDir, "common", "testcontainers", "web3signerconf")
req := testcontainers.ContainerRequest{
Image: "consensys/web3signer:develop",
ExposedPorts: []string{"9000/tcp"},
Cmd: []string{"--key-config-path", "/web3signerconf/", "eth1", "--chain-id", fmt.Sprintf("%d", chainId)},
Files: []testcontainers.ContainerFile{
{
HostFilePath: web3SignerConfDir,
ContainerFilePath: "/",
FileMode: 0o777,
},
},
WaitingFor: wait.ForLog("ready to handle signing requests"),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
}
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
if err != nil {
log.Printf("failed to start web3signer container: %s", err)
return err
}
t.web3SignerContainer, _ = container.(*testcontainers.DockerContainer)
return nil
}
// GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint // GetPoSL1EndPoint returns the endpoint of the running PoS L1 endpoint
func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) { func (t *TestcontainerApps) GetPoSL1EndPoint() (string, error) {
if t.poSL1Container == nil { if t.poSL1Container == nil {
return "", errors.New("PoS L1 container is not running") return "", fmt.Errorf("PoS L1 container is not running")
} }
contrainer, err := t.poSL1Container.ServiceContainer(context.Background(), "geth") contrainer, err := t.poSL1Container.ServiceContainer(context.Background(), "geth")
if err != nil { if err != nil {
@@ -178,7 +135,7 @@ func (t *TestcontainerApps) GetPoSL1Client() (*ethclient.Client, error) {
// GetDBEndPoint returns the endpoint of the running postgres container // GetDBEndPoint returns the endpoint of the running postgres container
func (t *TestcontainerApps) GetDBEndPoint() (string, error) { func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() { if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
return "", errors.New("postgres is not running") return "", fmt.Errorf("postgres is not running")
} }
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable") return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
} }
@@ -186,7 +143,7 @@ func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
// GetL2GethEndPoint returns the endpoint of the running L2Geth container // GetL2GethEndPoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) { func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() { if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
return "", errors.New("l2 geth is not running") return "", fmt.Errorf("l2 geth is not running")
} }
endpoint, err := t.l2GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws") endpoint, err := t.l2GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
if err != nil { if err != nil {
@@ -195,14 +152,6 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
return endpoint, nil return endpoint, nil
} }
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetWeb3SignerEndpoint() (string, error) {
if t.web3SignerContainer == nil || !t.web3SignerContainer.IsRunning() {
return "", errors.New("web3signer is not running")
}
return t.web3SignerContainer.PortEndpoint(context.Background(), "9000/tcp", "http")
}
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container // GetGormDBClient returns a gorm.DB by connecting to the running postgres container
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) { func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
endpoint, err := t.GetDBEndPoint() endpoint, err := t.GetDBEndPoint()
@@ -251,11 +200,6 @@ func (t *TestcontainerApps) Free() {
t.poSL1Container = nil t.poSL1Container = nil
} }
} }
if t.web3SignerContainer != nil && t.web3SignerContainer.IsRunning() {
if err := t.web3SignerContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop web3signer container: %s", err)
}
}
} }
// findProjectRootDir find project root directory // findProjectRootDir find project root directory
@@ -273,7 +217,7 @@ func findProjectRootDir() (string, error) {
parentDir := filepath.Dir(currentDir) parentDir := filepath.Dir(currentDir)
if parentDir == currentDir { if parentDir == currentDir {
return "", errors.New("go.work file not found in any parent directory") return "", fmt.Errorf("go.work file not found in any parent directory")
} }
currentDir = parentDir currentDir = parentDir

View File

@@ -44,11 +44,6 @@ func TestNewTestcontainerApps(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, ethclient) assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartWeb3SignerContainer(1))
endpoint, err = testApps.GetWeb3SignerEndpoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
// test free testcontainers // test free testcontainers
testApps.Free() testApps.Free()
endpoint, err = testApps.GetDBEndPoint() endpoint, err = testApps.GetDBEndPoint()
@@ -62,8 +57,4 @@ func TestNewTestcontainerApps(t *testing.T) {
endpoint, err = testApps.GetPoSL1EndPoint() endpoint, err = testApps.GetPoSL1EndPoint()
assert.EqualError(t, err, "PoS L1 container is not running") assert.EqualError(t, err, "PoS L1 container is not running")
assert.Empty(t, endpoint) assert.Empty(t, endpoint)
endpoint, err = testApps.GetWeb3SignerEndpoint()
assert.EqualError(t, err, "web3signer is not running")
assert.Empty(t, endpoint)
} }

View File

@@ -1,7 +0,0 @@
type: "file-raw"
keyType: "SECP256K1"
privateKey: "0x1313131313131313131313131313131313131313131313131313131313131313"
---
type: "file-raw"
keyType: "SECP256K1"
privateKey: "0x1212121212121212121212121212121212121212121212121212121212121212"

View File

@@ -109,10 +109,6 @@ const (
ProverTaskFailureTypeVerifiedFailed ProverTaskFailureTypeVerifiedFailed
// ProverTaskFailureTypeServerError collect occur error // ProverTaskFailureTypeServerError collect occur error
ProverTaskFailureTypeServerError ProverTaskFailureTypeServerError
// ProverTaskFailureTypeObjectAlreadyVerified object(batch/chunk) already verified, may exists in test env when ENABLE_TEST_ENV_BYPASS_FEATURES is true
ProverTaskFailureTypeObjectAlreadyVerified
// ProverTaskFailureTypeReassignedByAdmin reassigned by admin, this value is used in admin-system and defined here for clarity
ProverTaskFailureTypeReassignedByAdmin
) )
func (r ProverTaskFailureType) String() string { func (r ProverTaskFailureType) String() string {
@@ -127,10 +123,6 @@ func (r ProverTaskFailureType) String() string {
return "prover task failure verified failed" return "prover task failure verified failed"
case ProverTaskFailureTypeServerError: case ProverTaskFailureTypeServerError:
return "prover task failure server exception" return "prover task failure server exception"
case ProverTaskFailureTypeObjectAlreadyVerified:
return "prover task failure object already verified"
case ProverTaskFailureTypeReassignedByAdmin:
return "prover task failure reassigned by admin"
default: default:
return fmt.Sprintf("illegal prover task failure type (%d)", int32(r)) return fmt.Sprintf("illegal prover task failure type (%d)", int32(r))
} }
@@ -196,31 +188,6 @@ func (s ChunkProofsStatus) String() string {
} }
} }
// BatchProofsStatus describes the proving status of batches that belong to a bundle.
type BatchProofsStatus int
const (
// BatchProofsStatusUndefined represents an undefined batch proofs status
BatchProofsStatusUndefined BatchProofsStatus = iota
// BatchProofsStatusPending means that some batches that belong to this bundle have not been proven
BatchProofsStatusPending
// BatchProofsStatusReady means that all batches that belong to this bundle have been proven
BatchProofsStatusReady
)
func (s BatchProofsStatus) String() string {
switch s {
case BatchProofsStatusPending:
return "BatchProofsStatusPending"
case BatchProofsStatusReady:
return "BatchProofsStatusReady"
default:
return fmt.Sprintf("Undefined BatchProofsStatus (%d)", int32(s))
}
}
// RollupStatus block_batch rollup_status (pending, committing, committed, commit_failed, finalizing, finalized, finalize_skipped, finalize_failed) // RollupStatus block_batch rollup_status (pending, committing, committed, commit_failed, finalizing, finalized, finalize_skipped, finalize_failed)
type RollupStatus int type RollupStatus int

View File

@@ -0,0 +1,91 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
// HardForkName the hard fork name
HardForkName string `json:"hard_fork_name"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -0,0 +1,89 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type LegacyAuthMsg struct {
// Message fields
Identity *LegacyIdentity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// LegacyIdentity contains all the fields to be signed by the prover.
type LegacyIdentity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *LegacyAuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *LegacyAuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *LegacyIdentity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -1,10 +1,28 @@
package message package message
import ( import (
"crypto/ecdsa"
"crypto/rand"
"encoding/hex"
"errors" "errors"
"fmt" "fmt"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// ProofFailureType the proof failure type
type ProofFailureType int
const (
// ProofFailureUndefined the undefined type proof failure type
ProofFailureUndefined ProofFailureType = iota
// ProofFailurePanic proof failure for prover panic
ProofFailurePanic
// ProofFailureNoPanic proof failure for no prover panic
ProofFailureNoPanic
) )
// RespStatus represents status code from prover to scroll // RespStatus represents status code from prover to scroll
@@ -17,7 +35,7 @@ const (
StatusProofError StatusProofError
) )
// ProofType represents the type of task. // ProofType represents the type of prover.
type ProofType uint8 type ProofType uint8
func (r ProofType) String() string { func (r ProofType) String() string {
@@ -26,8 +44,6 @@ func (r ProofType) String() string {
return "proof type chunk" return "proof type chunk"
case ProofTypeBatch: case ProofTypeBatch:
return "proof type batch" return "proof type batch"
case ProofTypeBundle:
return "proof type bundle"
default: default:
return fmt.Sprintf("illegal proof type: %d", r) return fmt.Sprintf("illegal proof type: %d", r)
} }
@@ -36,14 +52,93 @@ func (r ProofType) String() string {
const ( const (
// ProofTypeUndefined is an unknown proof type // ProofTypeUndefined is an unknown proof type
ProofTypeUndefined ProofType = iota ProofTypeUndefined ProofType = iota
// ProofTypeChunk generates a proof for a ZkEvm chunk, where the inputs are the execution traces for blocks contained in the chunk. ProofTypeChunk is the default proof type. // ProofTypeChunk is default prover, it only generates zk proof from traces.
ProofTypeChunk ProofTypeChunk
// ProofTypeBatch generates zk proof from chunk proofs // ProofTypeBatch generates zk proof from other zk proofs and aggregate them into one proof.
ProofTypeBatch ProofTypeBatch
// ProofTypeBundle generates zk proof from batch proofs
ProofTypeBundle
) )
// GenerateToken generates token
func GenerateToken() (string, error) {
b := make([]byte, 16)
if _, err := rand.Read(b); err != nil {
return "", err
}
return hex.EncodeToString(b), nil
}
// ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct {
*ProofDetail `json:"zkProof"`
// Prover signature
Signature string `json:"signature"`
// Prover public key
publicKey string
}
// Sign signs the ProofMsg.
func (a *ProofMsg) Sign(priv *ecdsa.PrivateKey) error {
hash, err := a.ProofDetail.Hash()
if err != nil {
return err
}
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies ProofMsg.Signature.
func (a *ProofMsg) Verify() (bool, error) {
hash, err := a.ProofDetail.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
// recover public key
if a.publicKey == "" {
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
}
return crypto.VerifySignature(common.FromHex(a.publicKey), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *ProofMsg) PublicKey() (string, error) {
if a.publicKey == "" {
hash, err := a.ProofDetail.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
a.publicKey = common.Bytes2Hex(crypto.CompressPubkey(pk))
return a.publicKey, nil
}
return a.publicKey, nil
}
// TaskMsg is a wrapper type around db ProveTask type.
type TaskMsg struct {
UUID string `json:"uuid"`
ID string `json:"id"`
Type ProofType `json:"type,omitempty"`
BatchTaskDetail *BatchTaskDetail `json:"batch_task_detail,omitempty"`
ChunkTaskDetail *ChunkTaskDetail `json:"chunk_task_detail,omitempty"`
}
// ChunkTaskDetail is a type containing ChunkTask detail. // ChunkTaskDetail is a type containing ChunkTask detail.
type ChunkTaskDetail struct { type ChunkTaskDetail struct {
BlockHashes []common.Hash `json:"block_hashes"` BlockHashes []common.Hash `json:"block_hashes"`
@@ -53,13 +148,28 @@ type ChunkTaskDetail struct {
type BatchTaskDetail struct { type BatchTaskDetail struct {
ChunkInfos []*ChunkInfo `json:"chunk_infos"` ChunkInfos []*ChunkInfo `json:"chunk_infos"`
ChunkProofs []*ChunkProof `json:"chunk_proofs"` ChunkProofs []*ChunkProof `json:"chunk_proofs"`
BatchHeader interface{} `json:"batch_header"`
BlobBytes []byte `json:"blob_bytes"`
} }
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches. // ProofDetail is the message received from provers that contains zk proof, the status of
type BundleTaskDetail struct { // the proof generation succeeded, and an error message if proof generation failed.
BatchProofs []*BatchProof `json:"batch_proofs"` type ProofDetail struct {
ID string `json:"id"`
Type ProofType `json:"type,omitempty"`
Status RespStatus `json:"status"`
ChunkProof *ChunkProof `json:"chunk_proof,omitempty"`
BatchProof *BatchProof `json:"batch_proof,omitempty"`
Error string `json:"error,omitempty"`
}
// Hash return proofMsg content hash.
func (z *ProofDetail) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(z)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
} }
// ChunkInfo is for calculating pi_hash for chunk // ChunkInfo is for calculating pi_hash for chunk
@@ -94,16 +204,15 @@ type ChunkProof struct {
// BatchProof includes the proof info that are required for batch verification and rollup. // BatchProof includes the proof info that are required for batch verification and rollup.
type BatchProof struct { type BatchProof struct {
Protocol []byte `json:"protocol"`
Proof []byte `json:"proof"` Proof []byte `json:"proof"`
Instances []byte `json:"instances"` Instances []byte `json:"instances"`
Vk []byte `json:"vk"` Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution // cross-reference between cooridinator computation and prover compution
BatchHash common.Hash `json:"batch_hash"` GitVersion string `json:"git_version,omitempty"`
GitVersion string `json:"git_version,omitempty"`
} }
// SanityCheck checks whether a BatchProof is in a legal format // SanityCheck checks whether an BatchProof is in a legal format
// TODO: change to check Proof&Instance when upgrading to snark verifier v0.4
func (ap *BatchProof) SanityCheck() error { func (ap *BatchProof) SanityCheck() error {
if ap == nil { if ap == nil {
return errors.New("agg_proof is nil") return errors.New("agg_proof is nil")
@@ -112,51 +221,8 @@ func (ap *BatchProof) SanityCheck() error {
if len(ap.Proof) == 0 { if len(ap.Proof) == 0 {
return errors.New("proof not ready") return errors.New("proof not ready")
} }
if len(ap.Proof)%32 != 0 { if len(ap.Proof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof)) return fmt.Errorf("proof buffer has wrong length, expected: 32, got: %d", len(ap.Proof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
}
return nil
}
// BundleProof includes the proof info that are required for verification of a bundle of batch proofs.
type BundleProof struct {
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
GitVersion string `json:"git_version,omitempty"`
}
// SanityCheck checks whether a BundleProof is in a legal format
func (ap *BundleProof) SanityCheck() error {
if ap == nil {
return errors.New("agg_proof is nil")
}
if len(ap.Proof) == 0 {
return errors.New("proof not ready")
}
if len(ap.Proof)%32 != 0 {
return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof))
}
if len(ap.Instances) == 0 {
return errors.New("instance not ready")
}
if len(ap.Vk) == 0 {
return errors.New("vk not ready")
} }
return nil return nil

View File

@@ -0,0 +1,158 @@
package message
import (
"encoding/hex"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
func TestAuthMessageSignAndVerify(t *testing.T) {
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
authMsg := &AuthMsg{
Identity: &Identity{
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzgxNzUsIm9yaWdfaWF0IjoxNjkxMDM0NTc1fQ.HybBMsEJFhyZqtIa2iVcHUP7CEFttf708jmTMAImAWA",
ProverName: "test",
ProverVersion: "v1.0.0",
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
// Check public key.
pk, err := authMsg.PublicKey()
assert.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
ok, err := authMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
// Check public key is ok.
pub, err := authMsg.PublicKey()
assert.NoError(t, err)
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
}
func TestGenerateToken(t *testing.T) {
token, err := GenerateToken()
assert.NoError(t, err)
assert.Equal(t, 32, len(token))
}
func TestIdentityHash(t *testing.T) {
identity := &Identity{
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzM0MTksIm9yaWdfaWF0IjoxNjkxMDI5ODE5fQ.EhkLZsj__rNPVC3ZDYBtvdh0nB8mmM_Hl82hObaIWOs",
ProverName: "test",
ProverVersion: "v1.0.0",
}
hash, err := identity.Hash()
assert.NoError(t, err)
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}
func TestProofMessageSignVerifyPublicKey(t *testing.T) {
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
proofMsg := &ProofMsg{
ProofDetail: &ProofDetail{
ID: "testID",
Type: ProofTypeChunk,
Status: StatusOk,
ChunkProof: &ChunkProof{
StorageTrace: []byte("testStorageTrace"),
Protocol: []byte("testProtocol"),
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
},
}
assert.NoError(t, proofMsg.Sign(privkey))
// Test when publicKey is not set.
ok, err := proofMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
// Test when publicKey is already set.
ok, err = proofMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
}
func TestProofDetailHash(t *testing.T) {
proofDetail := &ProofDetail{
ID: "testID",
Type: ProofTypeChunk,
Status: StatusOk,
ChunkProof: &ChunkProof{
StorageTrace: []byte("testStorageTrace"),
Protocol: []byte("testProtocol"),
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
}
hash, err := proofDetail.Hash()
assert.NoError(t, err)
expectedHash := "01128ea9006601146ba80dbda959c96ebaefca463e78570e473a57d821db5ec1"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}
func TestProveTypeString(t *testing.T) {
proofTypeChunk := ProofType(1)
assert.Equal(t, "proof type chunk", proofTypeChunk.String())
proofTypeBatch := ProofType(2)
assert.Equal(t, "proof type batch", proofTypeBatch.String())
illegalProof := ProofType(3)
assert.Equal(t, "illegal proof type: 3", illegalProof.String())
}
func TestProofMsgPublicKey(t *testing.T) {
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
proofMsg := &ProofMsg{
ProofDetail: &ProofDetail{
ID: "testID",
Type: ProofTypeChunk,
Status: StatusOk,
ChunkProof: &ChunkProof{
StorageTrace: []byte("testStorageTrace"),
Protocol: []byte("testProtocol"),
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
},
}
assert.NoError(t, proofMsg.Sign(privkey))
// Test when publicKey is not set.
pk, err := proofMsg.PublicKey()
assert.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
// Test when publicKey is already set.
proofMsg.publicKey = common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey))
pk, err = proofMsg.PublicKey()
assert.NoError(t, err)
assert.Equal(t, common.Bytes2Hex(crypto.CompressPubkey(&privkey.PublicKey)), pk)
}

View File

@@ -2,7 +2,6 @@ package utils
import ( import (
"crypto/ecdsa" "crypto/ecdsa"
"errors"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@@ -29,7 +28,7 @@ func LoadOrCreateKey(keystorePath string, keystorePassword string) (*ecdsa.Priva
} else if err != nil { } else if err != nil {
return nil, err return nil, err
} else if fi.IsDir() { } else if fi.IsDir() {
return nil, errors.New("keystorePath cannot be a dir") return nil, fmt.Errorf("keystorePath cannot be a dir")
} }
keyjson, err := os.ReadFile(filepath.Clean(keystorePath)) keyjson, err := os.ReadFile(filepath.Clean(keystorePath))

View File

@@ -9,14 +9,10 @@ import (
"math/big" "math/big"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"strconv"
"strings"
"time" "time"
"github.com/modern-go/reflect2" "github.com/modern-go/reflect2"
"github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core"
"github.com/scroll-tech/go-ethereum/log"
) )
// TryTimes try run several times until the function return true. // TryTimes try run several times until the function return true.
@@ -82,89 +78,3 @@ func ReadGenesis(genesisPath string) (*core.Genesis, error) {
} }
return genesis, file.Close() return genesis, file.Close()
} }
// OverrideConfigWithEnv recursively overrides config values with environment variables
func OverrideConfigWithEnv(cfg interface{}, prefix string) error {
v := reflect.ValueOf(cfg)
if v.Kind() != reflect.Ptr || v.IsNil() {
return nil
}
v = v.Elem()
t := v.Type()
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
fieldValue := v.Field(i)
if !fieldValue.CanSet() {
continue
}
tag := field.Tag.Get("json")
if tag == "" {
tag = strings.ToLower(field.Name)
}
envKey := prefix + "_" + strings.ToUpper(tag)
switch fieldValue.Kind() {
case reflect.Ptr:
if !fieldValue.IsNil() {
err := OverrideConfigWithEnv(fieldValue.Interface(), envKey)
if err != nil {
return err
}
}
case reflect.Struct:
err := OverrideConfigWithEnv(fieldValue.Addr().Interface(), envKey)
if err != nil {
return err
}
default:
if envValue, exists := os.LookupEnv(envKey); exists {
log.Info("Overriding config with env var", "key", envKey)
err := setField(fieldValue, envValue)
if err != nil {
return err
}
}
}
}
return nil
}
// setField sets the value of a field based on the environment variable value
func setField(field reflect.Value, value string) error {
switch field.Kind() {
case reflect.String:
field.SetString(value)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
intValue, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return err
}
field.SetInt(intValue)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
uintValue, err := strconv.ParseUint(value, 10, 64)
if err != nil {
return err
}
field.SetUint(uintValue)
case reflect.Float32, reflect.Float64:
floatValue, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
field.SetFloat(floatValue)
case reflect.Bool:
boolValue, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(boolValue)
default:
return fmt.Errorf("unsupported type: %v", field.Kind())
}
return nil
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug" "runtime/debug"
) )
var tag = "v4.4.66" var tag = "v4.4.26"
var commit = func() string { var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok { if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -26,9 +26,6 @@ coordinator_api: libzkp ## Builds the Coordinator api instance.
coordinator_cron: coordinator_cron:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
coordinator_tool:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_tool ./cmd/tool
coordinator_api_skip_libzkp: coordinator_api_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
@@ -57,4 +54,4 @@ docker:
docker_push: docker_push:
docker push scrolltech/coordinator-api:${IMAGE_VERSION} docker push scrolltech/coordinator-api:${IMAGE_VERSION}
docker push scrolltech/coordinator-cron:${IMAGE_VERSION} docker push scrolltech/coordinator-cron:${IMAGE_VERSION}

View File

@@ -88,25 +88,13 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
} }
// Reset prover manager config for manager test cases. // Reset prover manager config for manager test cases.
cfg.ProverManager = &coordinatorConfig.ProverManager{ cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1, ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{ Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
MockMode: true,
LowVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwin",
MinProverVersion: "v4.2.0",
},
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwinV2",
MinProverVersion: "v4.3.0",
},
},
BatchCollectionTimeSec: 60, BatchCollectionTimeSec: 60,
ChunkCollectionTimeSec: 60, ChunkCollectionTimeSec: 60,
SessionAttempts: 10, SessionAttempts: 10,
MaxVerifierWorkers: 4,
MinProverVersion: "v1.0.0",
} }
endpoint, err := c.testApps.GetDBEndPoint() endpoint, err := c.testApps.GetDBEndPoint()
if err != nil { if err != nil {

View File

@@ -1,101 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
var app *cli.App
func init() {
// Set up coordinator app info.
app = cli.NewApp()
app.Action = action
app.Name = "coordinator-tool"
app.Usage = "The Scroll L2 Coordinator Tool"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
defer func() {
if err = database.CloseDB(db); err != nil {
log.Error("can not close db connection", "error", err)
}
}()
batchOrm := orm.NewBatch(db)
taskID := "fa9a290c8f1a46dc626fa67d626fadfe4803968ce776383996f3ae12504a2591"
batches, err := batchOrm.GetBatchesByBundleHash(ctx.Context, taskID)
if err != nil {
log.Error("failed to get batch proofs for batch", "task_id", taskID, "error", err)
return err
}
if len(batches) == 0 {
log.Error("failed to get batch proofs for bundle, not found batch", "task_id", taskID)
return fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", taskID)
}
var batchProofs []*message.BatchProof
for _, batch := range batches {
var proof message.BatchProof
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
log.Error("failed to unmarshal batch proof")
return fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, taskID, batch.Hash)
}
batchProofs = append(batchProofs, &proof)
}
taskDetail := message.BundleTaskDetail{
BatchProofs: batchProofs,
}
batchProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
log.Error("failed to marshal batch proof")
return fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", taskID, err)
}
taskMsg := &coordinatorType.GetTaskSchema{
TaskID: taskID,
TaskType: int(message.ProofTypeBundle),
TaskData: string(batchProofsBytes),
}
log.Info("task_msg", "data", taskMsg)
return nil
}
func main() {
// RunApp the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -2,24 +2,16 @@
"prover_manager": { "prover_manager": {
"provers_per_session": 1, "provers_per_session": 1,
"session_attempts": 5, "session_attempts": 5,
"bundle_collection_time_sec": 180,
"batch_collection_time_sec": 180, "batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180, "chunk_collection_time_sec": 180,
"verifier": { "verifier": {
"fork_name": "bernoulli",
"mock_mode": true, "mock_mode": true,
"low_version_circuit": { "params_path": "",
"params_path": "params", "assets_path": ""
"assets_path": "assets", },
"fork_name": "darwin", "max_verifier_workers": 4,
"min_prover_version": "v4.4.43" "min_prover_version": "v1.0.0"
},
"high_version_circuit": {
"params_path": "params",
"assets_path": "assets",
"fork_name": "darwinV2",
"min_prover_version": "v4.4.45"
}
}
}, },
"db": { "db": {
"driver_name": "postgres", "driver_name": "postgres",
@@ -32,7 +24,7 @@
}, },
"auth": { "auth": {
"secret": "prover secret key", "secret": "prover secret key",
"challenge_expire_duration_sec": 3600, "challenge_expire_duration_sec": 10,
"login_expire_duration_sec": 3600 "login_expire_duration_sec": 3600
} }
} }

View File

@@ -6,11 +6,8 @@ require (
github.com/appleboy/gin-jwt/v2 v2.9.1 github.com/appleboy/gin-jwt/v2 v2.9.1
github.com/gin-gonic/gin v1.9.1 github.com/gin-gonic/gin v1.9.1
github.com/go-resty/resty/v2 v2.7.0 github.com/go-resty/resty/v2 v2.7.0
github.com/google/uuid v1.6.0
github.com/mitchellh/mapstructure v1.5.0 github.com/mitchellh/mapstructure v1.5.0
github.com/prometheus/client_golang v1.19.0 github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6
github.com/shopspring/decimal v1.3.1 github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
@@ -40,14 +37,20 @@ require (
github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect github.com/ugorji/go/codec v1.2.11 // indirect
golang.org/x/net v0.21.0 // indirect golang.org/x/net v0.20.0 // indirect
golang.org/x/text v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
) )
require (
github.com/google/uuid v1.6.0
github.com/prometheus/client_golang v1.19.0
github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570
)
require ( require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect github.com/bits-and-blooms/bitset v1.12.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/chenzhuoyu/iasm v0.9.0 // indirect github.com/chenzhuoyu/iasm v0.9.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/bavard v0.1.13 // indirect
@@ -55,27 +58,27 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect github.com/crate-crypto/go-kzg-4844 v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect github.com/go-stack/stack v1.8.1 // indirect
github.com/holiman/uint256 v1.2.4 // indirect github.com/holiman/uint256 v1.2.4 // indirect
github.com/iden3/go-iden3-crypto v0.0.16 // indirect github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.8.4 // indirect github.com/scroll-tech/zktrie v0.8.2 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/supranational/blst v0.3.12 // indirect github.com/supranational/blst v0.3.11 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect github.com/tklauser/numcpus v0.6.1 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect
golang.org/x/crypto v0.24.0 // indirect golang.org/x/crypto v0.19.0 // indirect
golang.org/x/sync v0.7.0 // indirect golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect rsc.io/tmplfunc v0.0.3 // indirect
) )

View File

@@ -7,8 +7,8 @@ github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.12.0 h1:U/q1fAF7xXRhFCrhROzIfffYnu+dlS38vCZtmFVPHmA=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -43,8 +43,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/ethereum/c-kzg-4844 v1.0.2 h1:8tV84BCEiPeOkiVgW9mpYBeBUir2bkCNVqxPwwVeO+s= github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
github.com/ethereum/c-kzg-4844 v1.0.2/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
@@ -96,8 +96,8 @@ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iU
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iden3/go-iden3-crypto v0.0.16 h1:zN867xiz6HgErXVIV/6WyteGcOukE9gybYTorBMEdsk= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.16/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
@@ -173,12 +173,12 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068 h1:oVGwhg4cCq35B04eG/S4OBXDwXiFH7+LezuH2ZTRBPs= github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570 h1:2oA2bAFPQXDZcUK8TA9qd5zj6AsURpHyBaAha5goP0c=
github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= github.com/scroll-tech/da-codec v0.0.0-20240429123441-4aaf9d35e570/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/scroll-tech/zktrie v0.8.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
@@ -196,8 +196,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhUUbZ8= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
@@ -206,10 +206,10 @@ github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
@@ -221,8 +221,8 @@ github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6S
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y= golang.org/x/arch v0.5.0 h1:jpGode6huXQxcskEIpOCvrU+tzo81b6+oFLUYXWtH/Y=
golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.5.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
@@ -232,8 +232,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -243,13 +243,13 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -265,8 +265,10 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
@@ -275,8 +277,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=

View File

@@ -6,7 +6,6 @@ import (
"path/filepath" "path/filepath"
"scroll-tech/common/database" "scroll-tech/common/database"
"scroll-tech/common/utils"
) )
// ProverManager loads sequencer configuration items. // ProverManager loads sequencer configuration items.
@@ -22,8 +21,10 @@ type ProverManager struct {
BatchCollectionTimeSec int `json:"batch_collection_time_sec"` BatchCollectionTimeSec int `json:"batch_collection_time_sec"`
// ChunkCollectionTimeSec chunk Proof collection time (in seconds). // ChunkCollectionTimeSec chunk Proof collection time (in seconds).
ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"` ChunkCollectionTimeSec int `json:"chunk_collection_time_sec"`
// BundleCollectionTimeSec bundle Proof collection time (in seconds). // Max number of workers in verifier worker pool
BundleCollectionTimeSec int `json:"bundle_collection_time_sec"` MaxVerifierWorkers int `json:"max_verifier_workers"`
// MinProverVersion is the minimum version of the prover that is required.
MinProverVersion string `json:"min_prover_version"`
} }
// L2 loads l2geth configuration items. // L2 loads l2geth configuration items.
@@ -47,19 +48,12 @@ type Config struct {
Auth *Auth `json:"auth"` Auth *Auth `json:"auth"`
} }
// CircuitConfig circuit items.
type CircuitConfig struct {
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
ForkName string `json:"fork_name"`
MinProverVersion string `json:"min_prover_version"`
}
// VerifierConfig load zk verifier config. // VerifierConfig load zk verifier config.
type VerifierConfig struct { type VerifierConfig struct {
MockMode bool `json:"mock_mode"` ForkName string `json:"fork_name"`
LowVersionCircuit *CircuitConfig `json:"low_version_circuit"` MockMode bool `json:"mock_mode"`
HighVersionCircuit *CircuitConfig `json:"high_version_circuit"` ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
} }
// NewConfig returns a new instance of Config. // NewConfig returns a new instance of Config.
@@ -75,11 +69,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err return nil, err
} }
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_COORDINATOR")
if err != nil {
return nil, err
}
return cfg, nil return cfg, nil
} }

View File

@@ -1,16 +1,15 @@
package api package api
import ( import (
"errors"
"fmt" "fmt"
jwt "github.com/appleboy/gin-jwt/v2" jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/coordinator/internal/config" "scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/logic/auth" "scroll-tech/coordinator/internal/logic/auth"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/types" "scroll-tech/coordinator/internal/types"
) )
@@ -20,9 +19,9 @@ type AuthController struct {
} }
// NewAuthController returns an LoginController instance // NewAuthController returns an LoginController instance
func NewAuthController(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *AuthController { func NewAuthController(db *gorm.DB) *AuthController {
return &AuthController{ return &AuthController{
loginLogic: auth.NewLoginLogic(db, cfg, vf), loginLogic: auth.NewLoginLogic(db),
} }
} }
@@ -37,43 +36,61 @@ func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
// if not exist, the jwt token will intercept it // if not exist, the jwt token will intercept it
brearToken := c.GetHeader("Authorization") brearToken := c.GetHeader("Authorization")
if brearToken != "Bearer "+login.Message.Challenge { if brearToken != "Bearer "+login.Message.Challenge {
return "", errors.New("check challenge failure for the not equal challenge string") return "", fmt.Errorf("check challenge failure for the not equal challenge string")
}
if err := a.loginLogic.Check(&login); err != nil {
return "", fmt.Errorf("check the login parameter failure: %w", err)
}
hardForkNames, err := a.loginLogic.ProverHardForkName(&login)
if err != nil {
return "", fmt.Errorf("prover hard name failure:%w", err)
} }
// check the challenge is used, if used, return failure // check the challenge is used, if used, return failure
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil { if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
return "", fmt.Errorf("login insert challenge string failure:%w", err) return "", fmt.Errorf("login insert challenge string failure:%w", err)
} }
return login, nil
returnData := types.LoginParameterWithHardForkName{
HardForkName: hardForkNames,
LoginParameter: login,
}
return returnData, nil
} }
// PayloadFunc returns jwt.MapClaims with {public key, prover name}. // PayloadFunc returns jwt.MapClaims with {public key, prover name}.
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims { func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
v, ok := data.(types.LoginParameterWithHardForkName) v, ok := data.(types.LoginParameter)
if !ok { if !ok {
return jwt.MapClaims{} return jwt.MapClaims{}
} }
var publicKey string
var err error
if v.Message.HardForkName != "" {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
HardForkName: v.Message.HardForkName,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
}
if err != nil {
return jwt.MapClaims{}
}
if v.Message.HardForkName == "" {
v.Message.HardForkName = "shanghai"
}
return jwt.MapClaims{ return jwt.MapClaims{
types.HardForkName: v.HardForkName, types.PublicKey: publicKey,
types.PublicKey: v.PublicKey,
types.ProverName: v.Message.ProverName, types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion, types.ProverVersion: v.Message.ProverVersion,
types.HardForkName: v.Message.HardForkName,
} }
} }
@@ -95,6 +112,5 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
if hardForkName, ok := claims[types.HardForkName]; ok { if hardForkName, ok := claims[types.HardForkName]; ok {
c.Set(types.HardForkName, hardForkName) c.Set(types.HardForkName, hardForkName)
} }
return nil return nil
} }

View File

@@ -26,9 +26,9 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
panic("proof receiver new verifier failure") panic("proof receiver new verifier failure")
} }
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap, "bundleVerifier", vf.BundleVkMap) log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
Auth = NewAuthController(db, cfg, vf) Auth = NewAuthController(db)
GetTask = NewGetTaskController(cfg, chainCfg, db, reg) GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, chainCfg, db, vf, reg) SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
} }

View File

@@ -1,7 +1,6 @@
package api package api
import ( import (
"errors"
"fmt" "fmt"
"math/rand" "math/rand"
@@ -17,6 +16,7 @@ import (
"scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask" "scroll-tech/coordinator/internal/logic/provertask"
"scroll-tech/coordinator/internal/logic/verifier"
coordinatorType "scroll-tech/coordinator/internal/types" coordinatorType "scroll-tech/coordinator/internal/types"
) )
@@ -28,10 +28,9 @@ type GetTaskController struct {
} }
// NewGetTaskController create a get prover task controller // NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *GetTaskController { func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, reg) chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, reg) batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
bundleProverTask := provertask.NewBundleProverTask(cfg, chainCfg, db, reg)
ptc := &GetTaskController{ ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask), proverTasks: make(map[message.ProofType]provertask.ProverTask),
@@ -43,22 +42,22 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
ptc.proverTasks[message.ProofTypeBundle] = bundleProverTask
return ptc return ptc
} }
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error { func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey) publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist { if !publicKeyExist {
return errors.New("get public key from context failed") return fmt.Errorf("get public key from context failed")
} }
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName) proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist { if !proverNameExist {
return errors.New("get prover name from context failed") return fmt.Errorf("get prover name from context failed")
} }
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion) proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist { if !proverVersionExist {
return errors.New("get prover version from context failed") return fmt.Errorf("get prover version from context failed")
} }
ptc.getTaskAccessCounter.With(prometheus.Labels{ ptc.getTaskAccessCounter.With(prometheus.Labels{
@@ -98,7 +97,7 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
} }
if result == nil { if result == nil {
nerr := errors.New("get empty prover task") nerr := fmt.Errorf("get empty prover task")
types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, nerr) types.RenderFailure(ctx, types.ErrCoordinatorEmptyProofData, nerr)
return return
} }
@@ -107,21 +106,18 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
} }
func (ptc *GetTaskController) proofType(para *coordinatorType.GetTaskParameter) message.ProofType { func (ptc *GetTaskController) proofType(para *coordinatorType.GetTaskParameter) message.ProofType {
var proofTypes []message.ProofType proofType := message.ProofType(para.TaskType)
for _, proofType := range para.TaskTypes {
proofTypes = append(proofTypes, message.ProofType(proofType)) proofTypes := []message.ProofType{
message.ProofTypeChunk,
message.ProofTypeBatch,
} }
if len(proofTypes) == 0 { if proofType == message.ProofTypeUndefined {
proofTypes = []message.ProofType{ rand.Shuffle(len(proofTypes), func(i, j int) {
message.ProofTypeChunk, proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i]
message.ProofTypeBatch, })
message.ProofTypeBundle, proofType = proofTypes[0]
}
} }
return proofType
rand.Shuffle(len(proofTypes), func(i, j int) {
proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i]
})
return proofTypes[0]
} }

View File

@@ -1,14 +1,15 @@
package api package api
import ( import (
"encoding/json"
"fmt" "fmt"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/submitproof" "scroll-tech/coordinator/internal/logic/submitproof"
@@ -22,9 +23,9 @@ type SubmitProofController struct {
} }
// NewSubmitProofController create the submit proof api controller instance // NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController { func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *SubmitProofController {
return &SubmitProofController{ return &SubmitProofController{
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, chainCfg, db, vf, reg), submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db, vf, reg),
} }
} }
@@ -37,7 +38,36 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
return return
} }
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, spp); err != nil { proofMsg := message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: spp.TaskID,
Type: message.ProofType(spp.TaskType),
Status: message.RespStatus(spp.Status),
},
}
if spp.Status == int(message.StatusOk) {
switch message.ProofType(spp.TaskType) {
case message.ProofTypeChunk:
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
proofMsg.ChunkProof = &tmpChunkProof
case message.ProofTypeBatch:
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorParameterInvalidNo, nerr)
return
}
proofMsg.BatchProof = &tmpBatchProof
}
}
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
nerr := fmt.Errorf("handle zk proof failure, err:%w", err) nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
types.RenderFailure(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr) types.RenderFailure(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr)
return return

View File

@@ -23,55 +23,38 @@ type Collector struct {
db *gorm.DB db *gorm.DB
ctx context.Context ctx context.Context
stopBundleTimeoutChan chan struct{} stopChunkTimeoutChan chan struct{}
stopChunkTimeoutChan chan struct{} stopBatchTimeoutChan chan struct{}
stopBatchTimeoutChan chan struct{} stopBatchAllChunkReadyChan chan struct{}
stopBatchAllChunkReadyChan chan struct{} stopCleanChallengeChan chan struct{}
stopBundleAllBatchReadyChan chan struct{}
stopCleanChallengeChan chan struct{}
proverTaskOrm *orm.ProverTask proverTaskOrm *orm.ProverTask
bundleOrm *orm.Bundle
chunkOrm *orm.Chunk chunkOrm *orm.Chunk
batchOrm *orm.Batch batchOrm *orm.Batch
challenge *orm.Challenge challenge *orm.Challenge
timeoutBundleCheckerRunTotal prometheus.Counter timeoutBatchCheckerRunTotal prometheus.Counter
bundleProverTaskTimeoutTotal prometheus.Counter batchProverTaskTimeoutTotal prometheus.Counter
timeoutBatchCheckerRunTotal prometheus.Counter timeoutChunkCheckerRunTotal prometheus.Counter
batchProverTaskTimeoutTotal prometheus.Counter chunkProverTaskTimeoutTotal prometheus.Counter
timeoutChunkCheckerRunTotal prometheus.Counter checkBatchAllChunkReadyRunTotal prometheus.Counter
chunkProverTaskTimeoutTotal prometheus.Counter
checkBatchAllChunkReadyRunTotal prometheus.Counter
checkBundleAllBatchReadyRunTotal prometheus.Counter
} }
// NewCollector create a collector to cron collect the data to send to prover // NewCollector create a collector to cron collect the data to send to prover
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector { func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
c := &Collector{ c := &Collector{
cfg: cfg, cfg: cfg,
db: db, db: db,
ctx: ctx, ctx: ctx,
stopBundleTimeoutChan: make(chan struct{}), stopChunkTimeoutChan: make(chan struct{}),
stopChunkTimeoutChan: make(chan struct{}), stopBatchTimeoutChan: make(chan struct{}),
stopBatchTimeoutChan: make(chan struct{}), stopBatchAllChunkReadyChan: make(chan struct{}),
stopBatchAllChunkReadyChan: make(chan struct{}), stopCleanChallengeChan: make(chan struct{}),
stopBundleAllBatchReadyChan: make(chan struct{}), proverTaskOrm: orm.NewProverTask(db),
stopCleanChallengeChan: make(chan struct{}), chunkOrm: orm.NewChunk(db),
proverTaskOrm: orm.NewProverTask(db), batchOrm: orm.NewBatch(db),
chunkOrm: orm.NewChunk(db), challenge: orm.NewChallenge(db),
batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
challenge: orm.NewChallenge(db),
timeoutBundleCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_bundle_timeout_checker_run_total",
Help: "Total number of bundle timeout checker run.",
}),
bundleProverTaskTimeoutTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_bundle_prover_task_timeout_total",
Help: "Total number of bundle timeout prover task.",
}),
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_timeout_checker_run_total", Name: "coordinator_batch_timeout_checker_run_total",
Help: "Total number of batch timeout checker run.", Help: "Total number of batch timeout checker run.",
@@ -92,17 +75,11 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
Name: "coordinator_check_batch_all_chunk_ready_run_total", Name: "coordinator_check_batch_all_chunk_ready_run_total",
Help: "Total number of check batch all chunks ready total", Help: "Total number of check batch all chunks ready total",
}), }),
checkBundleAllBatchReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_check_bundle_all_batch_ready_run_total",
Help: "Total number of check bundle all batches ready total",
}),
} }
go c.timeoutBundleProofTask()
go c.timeoutBatchProofTask() go c.timeoutBatchProofTask()
go c.timeoutChunkProofTask() go c.timeoutChunkProofTask()
go c.checkBatchAllChunkReady() go c.checkBatchAllChunkReady()
go c.checkBundleAllBatchReady()
go c.cleanupChallenge() go c.cleanupChallenge()
log.Info("Start coordinator cron successfully.") log.Info("Start coordinator cron successfully.")
@@ -114,45 +91,10 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
func (c *Collector) Stop() { func (c *Collector) Stop() {
c.stopChunkTimeoutChan <- struct{}{} c.stopChunkTimeoutChan <- struct{}{}
c.stopBatchTimeoutChan <- struct{}{} c.stopBatchTimeoutChan <- struct{}{}
c.stopBundleTimeoutChan <- struct{}{}
c.stopBatchAllChunkReadyChan <- struct{}{} c.stopBatchAllChunkReadyChan <- struct{}{}
c.stopCleanChallengeChan <- struct{}{} c.stopCleanChallengeChan <- struct{}{}
} }
// timeoutBundleProofTask cron checks the send task is timeout. if timeout reached, restore the
// bundle task to unassigned. then the bundle collector can retry it.
func (c *Collector) timeoutBundleProofTask() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("timeout bundle proof task panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 2)
for {
select {
case <-ticker.C:
c.timeoutBundleCheckerRunTotal.Inc()
timeout := time.Duration(c.cfg.ProverManager.BundleCollectionTimeSec) * time.Second
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, message.ProofTypeBundle, timeout)
if err != nil {
log.Error("get unassigned session info failure", "error", err)
break
}
c.check(assignedProverTasks, c.bundleProverTaskTimeoutTotal)
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopBundleTimeoutChan:
log.Info("the coordinator timeoutBundleProofTask run loop exit")
return
}
}
}
// timeoutBatchProofTask cron check the send task is timeout. if timeout reached, restore the // timeoutBatchProofTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it. // chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutBatchProofTask() { func (c *Collector) timeoutBatchProofTask() {
@@ -260,16 +202,6 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err) log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err return err
} }
case message.ProofTypeBundle:
if err := c.bundleOrm.DecreaseActiveAttemptsByHash(c.ctx, assignedProverTask.TaskID, tx); err != nil {
log.Error("decrease bundle active attempts failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
if err := c.bundleOrm.UpdateProvingStatusFailed(c.ctx, assignedProverTask.TaskID, c.cfg.ProverManager.SessionAttempts, tx); err != nil {
log.Error("update proving status failed failure", "uuid", assignedProverTask.UUID, "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
} }
return nil return nil
@@ -336,60 +268,3 @@ func (c *Collector) checkBatchAllChunkReady() {
} }
} }
} }
func (c *Collector) checkBundleAllBatchReady() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("check batch all batches ready panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 10)
for {
select {
case <-ticker.C:
c.checkBundleAllBatchReadyRunTotal.Inc()
page := 1
pageSize := 50
for {
offset := (page - 1) * pageSize
bundles, err := c.bundleOrm.GetUnassignedAndBatchesUnreadyBundles(c.ctx, offset, pageSize)
if err != nil {
log.Warn("checkBundleAllBatchReady GetUnassignedAndBatchesUnreadyBundles", "error", err)
break
}
for _, bundle := range bundles {
allReady, checkErr := c.batchOrm.CheckIfBundleBatchProofsAreReady(c.ctx, bundle.Hash)
if checkErr != nil {
log.Warn("checkBundleAllBatchReady CheckIfBundleBatchProofsAreReady failure", "error", checkErr, "hash", bundle.Hash)
continue
}
if !allReady {
continue
}
if updateErr := c.bundleOrm.UpdateBatchProofsStatusByBundleHash(c.ctx, bundle.Hash, types.BatchProofsStatusReady); updateErr != nil {
log.Warn("checkBundleAllBatchReady UpdateBatchProofsStatusByBundleHash failure", "error", checkErr, "hash", bundle.Hash)
}
}
if len(bundles) < pageSize {
break
}
page++
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopBundleAllBatchReadyChan:
log.Info("the coordinator checkBundleAllBatchReady run loop exit")
return
}
}
}

View File

@@ -1,57 +1,21 @@
package auth package auth
import ( import (
"errors"
"fmt"
"strings"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm" "scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/internal/types"
) )
// LoginLogic the auth logic // LoginLogic the auth logic
type LoginLogic struct { type LoginLogic struct {
cfg *config.Config
challengeOrm *orm.Challenge challengeOrm *orm.Challenge
chunkVks map[string]struct{}
batchVKs map[string]struct{}
bundleVks map[string]struct{}
proverVersionHardForkMap map[string][]string
} }
// NewLoginLogic new a LoginLogic // NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *LoginLogic { func NewLoginLogic(db *gorm.DB) *LoginLogic {
proverVersionHardForkMap := make(map[string][]string)
if version.CheckScrollRepoVersion(cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion) {
log.Error("config file error, low verifier min_prover_version should not more than high verifier min_prover_version",
"low verifier min_prover_version", cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion,
"high verifier min_prover_version", cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion)
panic("verifier config file error")
}
var highHardForks []string
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.LowVersionCircuit.ForkName)
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
proverVersionHardForkMap[cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion] = []string{cfg.ProverManager.Verifier.LowVersionCircuit.ForkName}
return &LoginLogic{ return &LoginLogic{
cfg: cfg, challengeOrm: orm.NewChallenge(db),
chunkVks: vf.ChunkVKMap,
batchVKs: vf.BatchVKMap,
bundleVks: vf.BundleVkMap,
challengeOrm: orm.NewChallenge(db),
proverVersionHardForkMap: proverVersionHardForkMap,
} }
} }
@@ -59,67 +23,3 @@ func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *Logi
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error { func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge) return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
} }
func (l *LoginLogic) Check(login *types.LoginParameter) error {
verify, err := login.Verify()
if err != nil || !verify {
log.Error("auth message verify failure", "prover_name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
return errors.New("auth message verify failure")
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, login.Message.ProverVersion)
}
if len(login.Message.ProverTypes) > 0 {
vks := make(map[string]struct{})
for _, proverType := range login.Message.ProverTypes {
switch proverType {
case types.ProverTypeChunk:
for vk := range l.chunkVks {
vks[vk] = struct{}{}
}
case types.ProverTypeBatch:
for vk := range l.batchVKs {
vks[vk] = struct{}{}
}
for vk := range l.bundleVks {
vks[vk] = struct{}{}
}
default:
log.Error("invalid prover_type", "value", proverType, "prover name", login.Message.ProverName, "prover_version", login.Message.ProverVersion)
}
}
for _, vk := range login.Message.VKs {
if _, ok := vks[vk]; !ok {
log.Error("vk inconsistency", "prover vk", vk, "prover name", login.Message.ProverName,
"prover_version", login.Message.ProverVersion, "message", login.Message)
if !version.CheckScrollProverVersion(login.Message.ProverVersion) {
return fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s",
version.Version, login.Message.ProverVersion)
}
// if the prover reports a same prover version
return errors.New("incompatible vk. please check your params files or config files")
}
}
}
return nil
}
// ProverHardForkName retrieves hard fork name which prover belongs to
func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, error) {
proverVersionSplits := strings.Split(login.Message.ProverVersion, "-")
if len(proverVersionSplits) == 0 {
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
}
proverVersion := proverVersionSplits[0]
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
return strings.Join(hardForkNames, ","), nil
}
return "", fmt.Errorf("invalid prover prover_version:%s", login.Message.ProverVersion)
}

View File

@@ -3,15 +3,14 @@ package provertask
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"math"
"time" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/da-codec/encoding/codecv3"
"github.com/scroll-tech/da-codec/encoding/codecv4"
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/params"
@@ -31,23 +30,33 @@ import (
type BatchProverTask struct { type BatchProverTask struct {
BaseProverTask BaseProverTask
batchTaskGetTaskTotal *prometheus.CounterVec batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskProver *prometheus.CounterVec batchTaskGetTaskTotal *prometheus.CounterVec
batchTaskGetTaskProver *prometheus.CounterVec
} }
// NewBatchProverTask new a batch collector // NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProverTask { func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
bp := &BatchProverTask{ bp := &BatchProverTask{
BaseProverTask: BaseProverTask{ BaseProverTask: BaseProverTask{
vkMap: vkMap,
reverseVkMap: reverseMap(vkMap),
db: db, db: db,
cfg: cfg, cfg: cfg,
chainCfg: chainCfg, nameForkMap: nameForkMap,
blockOrm: orm.NewL2Block(db), forkHeights: forkHeights,
chunkOrm: orm.NewChunk(db), chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db), batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db), proverTaskOrm: orm.NewProverTask(db),
proverBlockListOrm: orm.NewProverBlockList(db), proverBlockListOrm: orm.NewProverBlockList(db),
}, },
batchAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_batch_attempts_exceed_total",
Help: "Total number of batch attempts exceed.",
}),
batchTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ batchTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_batch_get_task_total", Name: "coordinator_batch_get_task_total",
Help: "Total number of batch get task.", Help: "Total number of batch get task.",
@@ -57,20 +66,38 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
return bp return bp
} }
// Assign load and assign batch tasks type chunkIndexRange struct {
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) { start uint64
taskCtx, err := bp.checkParameter(ctx) end uint64
if err != nil || taskCtx == nil { }
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange {
var start, end = r.start, r.end
if o.start < r.start {
start = o.start
}
if o.end > r.end {
end = o.end
}
return &chunkIndexRange{start, end}
}
func (r *chunkIndexRange) contains(start, end uint64) bool {
return r.start <= start && r.end > end
}
type getHardForkNameByBatchFunc func(*orm.Batch) (string, error)
func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCtx *proverTaskContext,
chunkRange *chunkIndexRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByBatchFunc) (*coordinatorType.GetTaskSchema, error) {
startChunkIndex, endChunkIndex := chunkRange.start, chunkRange.end
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
var batchTask *orm.Batch var batchTask *orm.Batch
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
var getTaskError error var getTaskError error
var tmpBatchTask *orm.Batch var tmpBatchTask *orm.Batch
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts) tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil { if getTaskError != nil {
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
@@ -79,7 +106,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned` // Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql. // batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpBatchTask == nil { if tmpBatchTask == nil {
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), maxActiveAttempts, maxTotalAttempts) tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil { if getTaskError != nil {
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
@@ -112,32 +139,28 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
} }
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName) log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
var (
hardForkName, getHardForkErr := bp.hardForkName(ctx, batchTask) proverVersion = taskCtx.ProverVersion
if getHardForkErr != nil { hardForkName = taskCtx.HardForkName
bp.recoverActiveAttempts(ctx, batchTask) )
log.Error("retrieve hard fork name by batch failed", "task_id", batchTask.Hash, "err", getHardForkErr) var err error
return nil, ErrCoordinatorInternalFailure if getHardForkName != nil {
hardForkName, err = getHardForkName(batchTask)
if err != nil {
log.Error("failed to get hard fork name by batch", "task_id", batchTask.Hash, "error", err.Error())
return nil, ErrCoordinatorInternalFailure
}
} }
//if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
// bp.recoverActiveAttempts(ctx, batchTask)
// log.Error("incompatible prover version",
// "requisite hard fork name", hardForkName,
// "prover hard fork name", taskCtx.HardForkNames,
// "task_id", batchTask.Hash)
// return nil, ErrCoordinatorInternalFailure
//}
proverTask := orm.ProverTask{ proverTask := orm.ProverTask{
TaskID: batchTask.Hash, TaskID: batchTask.Hash,
ProverPublicKey: taskCtx.PublicKey, ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBatch), TaskType: int16(message.ProofTypeBatch),
ProverName: taskCtx.ProverName, ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion, ProverVersion: proverVersion,
ProvingStatus: int16(types.ProverAssigned), ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined), FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go // here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(), AssignedAt: utils.NowUTC(),
} }
@@ -148,7 +171,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
} }
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, batchTask, hardForkName) taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil { if err != nil {
bp.recoverActiveAttempts(ctx, batchTask) bp.recoverActiveAttempts(ctx, batchTask)
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err) log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
@@ -165,21 +188,113 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil return taskMsg, nil
} }
func (bp *BatchProverTask) hardForkName(ctx *gin.Context, batchTask *orm.Batch) (string, error) { func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, batchTask.StartChunkHash) hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
if getChunkErr != nil { if err != nil {
return "", getChunkErr // log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
} }
l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber) // if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
if getBlockErr != nil { // so the hard fork chunk's start_block_number must be ForkBlockNumber
return "", getBlockErr var startChunkIndex uint64 = 0
var endChunkIndex uint64 = math.MaxInt64
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", hardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if startChunk == nil {
return nil, nil
}
startChunkIndex = startChunk.Index
} }
hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp) if toBlockNum != math.MaxInt64 {
return hardForkName, nil toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", hardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if toChunk != nil {
// toChunk being nil only indicates that we haven't yet reached the fork boundary
// don't need change the endChunkIndex of math.MaxInt64
endChunkIndex = toChunk.Index
}
}
return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil
} }
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, batch *orm.Batch, hardForkName string) (*coordinatorType.GetTaskSchema, error) { func (bp *BatchProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
chunkRange, err := bp.getChunkRangeByName(ctx, taskCtx.HardForkName)
if err != nil {
return nil, err
}
if chunkRange == nil {
return nil, nil
}
return bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, nil)
}
func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
var (
hardForkNames [2]string
chunkRanges [2]*chunkIndexRange
err error
)
var chunkRange *chunkIndexRange
for i := 0; i < 2; i++ {
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
if err == nil && chunkRanges[i] != nil {
if chunkRange == nil {
chunkRange = chunkRanges[i]
} else {
chunkRange = chunkRange.merge(*chunkRanges[i])
}
}
}
if chunkRange == nil {
log.Error("chunkRange empty")
return nil, errors.New("chunkRange empty")
}
var hardForkName string
getHardForkName := func(batch *orm.Batch) (string, error) {
for i := 0; i < 2; i++ {
if chunkRanges[i] != nil && chunkRanges[i].contains(batch.StartChunkIndex, batch.EndChunkIndex) {
hardForkName = hardForkNames[i]
break
}
}
if hardForkName == "" {
log.Warn("get batch not belongs to any hard fork name", "batch id", batch.Index)
return "", fmt.Errorf("get batch not belongs to any hard fork name, batch id: %d", batch.Index)
}
return hardForkName, nil
}
schema, err := bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, getHardForkName)
if schema != nil && err == nil {
schema.HardForkName = hardForkName
return schema, nil
}
return schema, err
}
// Assign load and assign batch tasks
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
if len(getTaskParameter.VKs) > 0 {
return bp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
}
return bp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
}
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// get chunk from db // get chunk from db
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID) chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
if err != nil { if err != nil {
@@ -187,10 +302,6 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
return nil, err return nil, err
} }
if len(chunks) == 0 {
return nil, fmt.Errorf("no chunk found for batch task id:%s", task.TaskID)
}
var chunkProofs []*message.ChunkProof var chunkProofs []*message.ChunkProof
var chunkInfos []*message.ChunkInfo var chunkInfos []*message.ChunkInfo
for _, chunk := range chunks { for _, chunk := range chunks {
@@ -214,9 +325,9 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
chunkInfos = append(chunkInfos, &chunkInfo) chunkInfos = append(chunkInfos, &chunkInfo)
} }
taskDetail, err := bp.getBatchTaskDetail(batch, chunkInfos, chunkProofs) taskDetail := message.BatchTaskDetail{
if err != nil { ChunkInfos: chunkInfos,
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", task.TaskID, err) ChunkProofs: chunkProofs,
} }
chunkProofsBytes, err := json.Marshal(taskDetail) chunkProofsBytes, err := json.Marshal(taskDetail)
@@ -225,11 +336,10 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
} }
taskMsg := &coordinatorType.GetTaskSchema{ taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(), UUID: task.UUID.String(),
TaskID: task.TaskID, TaskID: task.TaskID,
TaskType: int(message.ProofTypeBatch), TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes), TaskData: string(chunkProofsBytes),
HardForkName: hardForkName,
} }
return taskMsg, nil return taskMsg, nil
} }
@@ -239,34 +349,3 @@ func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *or
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err) log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
} }
} }
func (bp *BatchProverTask) getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []*message.ChunkProof) (*message.BatchTaskDetail, error) {
taskDetail := &message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
if encoding.CodecVersion(dbBatch.CodecVersion) != encoding.CodecV3 && encoding.CodecVersion(dbBatch.CodecVersion) != encoding.CodecV4 {
return taskDetail, nil
}
if encoding.CodecVersion(dbBatch.CodecVersion) == encoding.CodecV3 {
batchHeader, decodeErr := codecv3.NewDABatchFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header (v3) for batch %d: %w", dbBatch.Index, decodeErr)
}
taskDetail.BatchHeader = batchHeader
taskDetail.BlobBytes = dbBatch.BlobBytes
} else {
batchHeader, decodeErr := codecv4.NewDABatchFromBytes(dbBatch.BatchHeader)
if decodeErr != nil {
return nil, fmt.Errorf("failed to decode batch header (v4) for batch %d: %w", dbBatch.Index, decodeErr)
}
taskDetail.BatchHeader = batchHeader
taskDetail.BlobBytes = dbBatch.BlobBytes
}
return taskDetail, nil
}

View File

@@ -1,229 +0,0 @@
package provertask
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/forks"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// BundleProverTask is prover task implement for bundle proof
type BundleProverTask struct {
BaseProverTask
bundleTaskGetTaskTotal *prometheus.CounterVec
bundleTaskGetTaskProver *prometheus.CounterVec
}
// NewBundleProverTask new a bundle collector
func NewBundleProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProverTask {
bp := &BundleProverTask{
BaseProverTask: BaseProverTask{
db: db,
chainCfg: chainCfg,
cfg: cfg,
blockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
proverTaskOrm: orm.NewProverTask(db),
proverBlockListOrm: orm.NewProverBlockList(db),
},
bundleTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_bundle_get_task_total",
Help: "Total number of bundle get task.",
}, []string{"fork_name"}),
bundleTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "bundle"),
}
return bp
}
// Assign load and assign batch tasks
func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := bp.checkParameter(ctx)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
var bundleTask *orm.Bundle
for i := 0; i < 5; i++ {
var getTaskError error
var tmpBundleTask *orm.Bundle
tmpBundleTask, getTaskError = bp.bundleOrm.GetAssignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get assigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// bundle to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpBundleTask == nil {
tmpBundleTask, getTaskError = bp.bundleOrm.GetUnassignedBundle(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil {
log.Error("failed to get unassigned bundle proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
}
if tmpBundleTask == nil {
log.Debug("get empty bundle", "height", getTaskParameter.ProverHeight)
return nil, nil
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
return nil, ErrCoordinatorInternalFailure
}
if rowsAffected == 0 {
time.Sleep(100 * time.Millisecond)
continue
}
bundleTask = tmpBundleTask
break
}
if bundleTask == nil {
log.Debug("get empty unassigned bundle after retry 5 times", "height", getTaskParameter.ProverHeight)
return nil, nil
}
log.Info("start bundle proof generation session", "task index", bundleTask.Index, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
hardForkName, getHardForkErr := bp.hardForkName(ctx, bundleTask)
if getHardForkErr != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("retrieve hard fork name by bundle failed", "task_id", bundleTask.Hash, "err", getHardForkErr)
return nil, ErrCoordinatorInternalFailure
}
//if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
// bp.recoverActiveAttempts(ctx, bundleTask)
// log.Error("incompatible prover version",
// "requisite hard fork name", hardForkName,
// "prover hard fork name", taskCtx.HardForkNames,
// "task_id", bundleTask.Hash)
// return nil, ErrCoordinatorInternalFailure
//}
proverTask := orm.ProverTask{
TaskID: bundleTask.Hash,
ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeBundle),
ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("insert bundle prover task info fail", "task_id", bundleTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
return nil, ErrCoordinatorInternalFailure
}
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask, hardForkName)
if err != nil {
bp.recoverActiveAttempts(ctx, bundleTask)
log.Error("format bundle prover task failure", "task_id", bundleTask.Hash, "err", err)
return nil, ErrCoordinatorInternalFailure
}
bp.bundleTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
bp.bundleTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil
}
func (bp *BundleProverTask) hardForkName(ctx *gin.Context, bundleTask *orm.Bundle) (string, error) {
startBatch, getBatchErr := bp.batchOrm.GetBatchByHash(ctx, bundleTask.StartBatchHash)
if getBatchErr != nil {
return "", getBatchErr
}
startChunk, getChunkErr := bp.chunkOrm.GetChunkByHash(ctx, startBatch.StartChunkHash)
if getChunkErr != nil {
return "", getChunkErr
}
l2Block, getBlockErr := bp.blockOrm.GetL2BlockByNumber(ctx.Copy(), startChunk.StartBlockNumber)
if getBlockErr != nil {
return "", getBlockErr
}
hardForkName := forks.GetHardforkName(bp.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
return hardForkName, nil
}
func (bp *BundleProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) {
// get bundle from db
batches, err := bp.batchOrm.GetBatchesByBundleHash(ctx, task.TaskID)
if err != nil {
err = fmt.Errorf("failed to get batch proofs for batch task id:%s err:%w ", task.TaskID, err)
return nil, err
}
if len(batches) == 0 {
return nil, fmt.Errorf("failed to get batch proofs for bundle task id:%s, no batch found", task.TaskID)
}
var batchProofs []*message.BatchProof
for _, batch := range batches {
var proof message.BatchProof
if encodeErr := json.Unmarshal(batch.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("failed to unmarshal proof: %w, bundle hash: %v, batch hash: %v", encodeErr, task.TaskID, batch.Hash)
}
batchProofs = append(batchProofs, &proof)
}
taskDetail := message.BundleTaskDetail{
BatchProofs: batchProofs,
}
batchProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal batch proofs, taskID:%s err:%w", task.TaskID, err)
}
taskMsg := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(),
TaskID: task.TaskID,
TaskType: int(message.ProofTypeBundle),
TaskData: string(batchProofsBytes),
HardForkName: hardForkName,
}
return taskMsg, nil
}
func (bp *BundleProverTask) recoverActiveAttempts(ctx *gin.Context, bundleTask *orm.Bundle) {
if err := bp.bundleOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), bundleTask.Hash); err != nil {
log.Error("failed to recover bundle active attempts", "hash", bundleTask.Hash, "error", err)
}
}

View File

@@ -3,6 +3,7 @@ package provertask
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"time" "time"
@@ -27,22 +28,32 @@ import (
type ChunkProverTask struct { type ChunkProverTask struct {
BaseProverTask BaseProverTask
chunkTaskGetTaskTotal *prometheus.CounterVec chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskProver *prometheus.CounterVec chunkTaskGetTaskTotal *prometheus.CounterVec
chunkTaskGetTaskProver *prometheus.CounterVec
} }
// NewChunkProverTask new a chunk prover task // NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProverTask { func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
cp := &ChunkProverTask{ cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{ BaseProverTask: BaseProverTask{
vkMap: vkMap,
reverseVkMap: reverseMap(vkMap),
db: db, db: db,
cfg: cfg, cfg: cfg,
chainCfg: chainCfg, nameForkMap: nameForkMap,
forkHeights: forkHeights,
chunkOrm: orm.NewChunk(db), chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db), blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db), proverTaskOrm: orm.NewProverTask(db),
proverBlockListOrm: orm.NewProverBlockList(db), proverBlockListOrm: orm.NewProverBlockList(db),
}, },
chunkAttemptsExceedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "coordinator_chunk_attempts_exceed_total",
Help: "Total number of chunk attempts exceed.",
}),
chunkTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ chunkTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_chunk_get_task_total", Name: "coordinator_chunk_get_task_total",
Help: "Total number of chunk get task.", Help: "Total number of chunk get task.",
@@ -52,11 +63,13 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
return cp return cp
} }
// Assign the chunk proof which need to prove type getHardForkNameByChunkFunc func(*orm.Chunk) (string, error)
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := cp.checkParameter(ctx) func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCtx *proverTaskContext,
if err != nil || taskCtx == nil { blockRange *blockRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByChunkFunc) (*coordinatorType.GetTaskSchema, error) {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) fromBlockNum, toBlockNum := blockRange.from, blockRange.to
if toBlockNum > getTaskParameter.ProverHeight {
toBlockNum = getTaskParameter.ProverHeight + 1
} }
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
@@ -65,7 +78,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
var getTaskError error var getTaskError error
var tmpChunkTask *orm.Chunk var tmpChunkTask *orm.Chunk
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight) tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil { if getTaskError != nil {
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
@@ -74,7 +87,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned` // Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql. // chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
if tmpChunkTask == nil { if tmpChunkTask == nil {
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight) tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
if getTaskError != nil { if getTaskError != nil {
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError) log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
@@ -107,32 +120,28 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
} }
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName) log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
var (
hardForkName, getHardForkErr := cp.hardForkName(ctx, chunkTask) proverVersion = taskCtx.ProverVersion
if getHardForkErr != nil { hardForkName = taskCtx.HardForkName
cp.recoverActiveAttempts(ctx, chunkTask) err error
log.Error("retrieve hard fork name by chunk failed", "task_id", chunkTask.Hash, "err", getHardForkErr) )
return nil, ErrCoordinatorInternalFailure if getHardForkName != nil {
hardForkName, err = getHardForkName(chunkTask)
if err != nil {
log.Error("failed to get hard fork name by chunk", "task_id", chunkTask.Hash, "error", err.Error())
return nil, ErrCoordinatorInternalFailure
}
} }
//if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
// cp.recoverActiveAttempts(ctx, chunkTask)
// log.Error("incompatible prover version",
// "requisite hard fork name", hardForkName,
// "prover hard fork name", taskCtx.HardForkNames,
// "task_id", chunkTask.Hash)
// return nil, ErrCoordinatorInternalFailure
//}
proverTask := orm.ProverTask{ proverTask := orm.ProverTask{
TaskID: chunkTask.Hash, TaskID: chunkTask.Hash,
ProverPublicKey: taskCtx.PublicKey, ProverPublicKey: taskCtx.PublicKey,
TaskType: int16(message.ProofTypeChunk), TaskType: int16(message.ProofTypeChunk),
ProverName: taskCtx.ProverName, ProverName: taskCtx.ProverName,
ProverVersion: taskCtx.ProverVersion, ProverVersion: proverVersion,
ProvingStatus: int16(types.ProverAssigned), ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined), FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/database/db.go // here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(), AssignedAt: utils.NowUTC(),
} }
@@ -142,7 +151,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
} }
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask, hardForkName) taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
if err != nil { if err != nil {
cp.recoverActiveAttempts(ctx, chunkTask) cp.recoverActiveAttempts(ctx, chunkTask)
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err) log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
@@ -159,16 +168,106 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return taskMsg, nil return taskMsg, nil
} }
func (cp *ChunkProverTask) hardForkName(ctx *gin.Context, chunkTask *orm.Chunk) (string, error) { func (cp *ChunkProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
l2Block, getBlockErr := cp.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunkTask.StartBlockNumber) blockRange, err := cp.getBlockRangeByName(taskCtx.HardForkName)
if getBlockErr != nil { if err != nil {
return "", getBlockErr return nil, err
} }
hardForkName := forks.GetHardforkName(cp.chainCfg, l2Block.Number, l2Block.BlockTimestamp) return cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, nil)
return hardForkName, nil
} }
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask, hardForkName string) (*coordinatorType.GetTaskSchema, error) { func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
var (
hardForkNames [2]string
blockRanges [2]*blockRange
err error
)
var blockRange *blockRange
for i := 0; i < 2; i++ {
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
if err == nil && blockRanges[i] != nil {
if blockRange == nil {
blockRange = blockRanges[i]
} else {
var err2 error
blockRange, err2 = blockRange.merge(*blockRanges[i])
if err2 != nil {
return nil, err2
}
}
}
}
if blockRange == nil {
log.Error("blockRange empty")
return nil, errors.New("blockRange empty")
}
var hardForkName string
getHardForkName := func(chunk *orm.Chunk) (string, error) {
for i := 0; i < 2; i++ {
if blockRanges[i] != nil && blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
hardForkName = hardForkNames[i]
break
}
}
if hardForkName == "" {
log.Warn("get chunk not belongs to any hard fork name", "chunk id", chunk.Index)
return "", fmt.Errorf("get chunk not belongs to any hard fork name, chunk id: %d", chunk.Index)
}
return hardForkName, nil
}
schema, err := cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, getHardForkName)
if schema != nil && err == nil {
schema.HardForkName = hardForkName
return schema, nil
}
return schema, err
}
type blockRange struct {
from uint64
to uint64
}
func (r *blockRange) merge(o blockRange) (*blockRange, error) {
if r.from == o.to {
return &blockRange{o.from, r.to}, nil
} else if r.to == o.from {
return &blockRange{r.from, o.to}, nil
}
return nil, fmt.Errorf("two ranges are not adjacent")
}
func (r *blockRange) contains(start, end uint64) bool {
return r.from <= start && r.to > end
}
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
if err != nil {
// log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
return &blockRange{fromBlockNum, toBlockNum}, nil
}
// Assign the chunk proof which need to prove
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
if err != nil || taskCtx == nil {
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
if len(getTaskParameter.VKs) > 0 {
return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
}
return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes. // Get block hashes.
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID) blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
if dbErr != nil || len(blockHashes) == 0 { if dbErr != nil || len(blockHashes) == 0 {
@@ -184,11 +283,10 @@ func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.Prove
} }
proverTaskSchema := &coordinatorType.GetTaskSchema{ proverTaskSchema := &coordinatorType.GetTaskSchema{
UUID: task.UUID.String(), UUID: task.UUID.String(),
TaskID: task.TaskID, TaskID: task.TaskID,
TaskType: int(message.ProofTypeChunk), TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes), TaskData: string(blockHashesBytes),
HardForkName: hardForkName,
} }
return proverTaskSchema, nil return proverTaskSchema, nil

View File

@@ -1,17 +1,17 @@
package provertask package provertask
import ( import (
"errors"
"fmt" "fmt"
"strings"
"sync" "sync"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm" "scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types" coordinatorType "scroll-tech/coordinator/internal/types"
@@ -19,12 +19,9 @@ import (
var ( var (
// ErrCoordinatorInternalFailure coordinator internal db failure // ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = errors.New("coordinator internal error") ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
) // ErrHardForkName indicates client request with the wrong hard fork name
ErrHardForkName = fmt.Errorf("wrong hard fork name")
var (
getTaskCounterInitOnce sync.Once
getTaskCounterVec *prometheus.CounterVec = nil
) )
// ProverTask the interface of a collector who send data to prover // ProverTask the interface of a collector who send data to prover
@@ -32,15 +29,30 @@ type ProverTask interface {
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
} }
func reverseMap(input map[string]string) map[string]string {
output := make(map[string]string, len(input))
for k, v := range input {
if k != "" {
output[v] = k
}
}
return output
}
// BaseProverTask a base prover task which contain series functions // BaseProverTask a base prover task which contain series functions
type BaseProverTask struct { type BaseProverTask struct {
cfg *config.Config cfg *config.Config
chainCfg *params.ChainConfig db *gorm.DB
db *gorm.DB
// key is hardForkName, value is vk
vkMap map[string]string
// key is vk, value is hardForkName
reverseVkMap map[string]string
nameForkMap map[string]uint64
forkHeights []uint64
batchOrm *orm.Batch batchOrm *orm.Batch
chunkOrm *orm.Chunk chunkOrm *orm.Chunk
bundleOrm *orm.Bundle
blockOrm *orm.L2Block blockOrm *orm.L2Block
proverTaskOrm *orm.ProverTask proverTaskOrm *orm.ProverTask
proverBlockListOrm *orm.ProverBlockList proverBlockListOrm *orm.ProverBlockList
@@ -50,39 +62,67 @@ type proverTaskContext struct {
PublicKey string PublicKey string
ProverName string ProverName string
ProverVersion string ProverVersion string
HardForkNames map[string]struct{} HardForkName string
} }
// checkParameter check the prover task parameter illegal // checkParameter check the prover task parameter illegal
func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, error) { func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*proverTaskContext, error) {
var ptc proverTaskContext var ptc proverTaskContext
ptc.HardForkNames = make(map[string]struct{})
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey) publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist { if !publicKeyExist {
return nil, errors.New("get public key from context failed") return nil, fmt.Errorf("get public key from context failed")
} }
ptc.PublicKey = publicKey.(string) ptc.PublicKey = publicKey.(string)
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName) proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist { if !proverNameExist {
return nil, errors.New("get prover name from context failed") return nil, fmt.Errorf("get prover name from context failed")
} }
ptc.ProverName = proverName.(string) ptc.ProverName = proverName.(string)
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion) proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist { if !proverVersionExist {
return nil, errors.New("get prover version from context failed") return nil, fmt.Errorf("get prover version from context failed")
} }
ptc.ProverVersion = proverVersion.(string) ptc.ProverVersion = proverVersion.(string)
hardForkNamesStr, hardForkNameExist := ctx.Get(coordinatorType.HardForkName) if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
if !hardForkNameExist { return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
return nil, errors.New("get hard fork name from context failed")
} }
hardForkNames := strings.Split(hardForkNamesStr.(string), ",")
for _, hardForkName := range hardForkNames { // signals that the prover is multi-circuits version
ptc.HardForkNames[hardForkName] = struct{}{} if len(getTaskParameter.VKs) > 0 {
if len(getTaskParameter.VKs) != 2 {
return nil, fmt.Errorf("parameter vks length must be 2")
}
for _, vk := range getTaskParameter.VKs {
if _, exists := b.reverseVkMap[vk]; !exists {
return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk)
}
}
} else {
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, fmt.Errorf("get hard fork name from context failed")
}
ptc.HardForkName = hardForkName.(string)
vk, vkExist := b.vkMap[ptc.HardForkName]
if !vkExist {
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
}
// if the prover has a different vk
if getTaskParameter.VK != vk {
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
// if the prover reports a different prover version
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
// if the prover reports a same prover version
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
}
} }
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string)) isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
@@ -104,6 +144,26 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
return &ptc, nil return &ptc, nil
} }
func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error) {
// when the first hard fork upgrade, the prover don't pass the fork_name to coordinator.
// so coordinator need to be compatible.
if forkName == "" {
return 0, nil
}
hardForkNumber, exist := b.nameForkMap[forkName]
if !exist {
return 0, ErrHardForkName
}
return hardForkNumber, nil
}
var (
getTaskCounterInitOnce sync.Once
getTaskCounterVec *prometheus.CounterVec = nil
)
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec { func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() { getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{ getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt"
"strings" "strings"
"time" "time"
@@ -11,10 +12,8 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/forks"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
@@ -36,26 +35,21 @@ var (
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success // ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success") ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
// ErrValidatorFailureVerifiedFailed failed to verify and the verifier returns error // ErrValidatorFailureVerifiedFailed failed to verify and the verifier returns error
ErrValidatorFailureVerifiedFailed = errors.New("verification failed, verifier returns error") ErrValidatorFailureVerifiedFailed = fmt.Errorf("verification failed, verifier returns error")
// ErrValidatorSuccessInvalidProof successful verified and the proof is invalid // ErrValidatorSuccessInvalidProof successful verified and the proof is invalid
ErrValidatorSuccessInvalidProof = errors.New("verification succeeded, it's an invalid proof") ErrValidatorSuccessInvalidProof = fmt.Errorf("verification succeeded, it's an invalid proof")
// ErrGetHardForkNameFailed failed to get hard fork name
ErrGetHardForkNameFailed = errors.New("failed to get hard fork name")
// ErrCoordinatorInternalFailure coordinator internal db failure // ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = errors.New("coordinator internal error") ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
) )
// ProofReceiverLogic the proof receiver logic // ProofReceiverLogic the proof receiver logic
type ProofReceiverLogic struct { type ProofReceiverLogic struct {
chunkOrm *orm.Chunk chunkOrm *orm.Chunk
batchOrm *orm.Batch batchOrm *orm.Batch
bundleOrm *orm.Bundle
blockOrm *orm.L2Block
proverTaskOrm *orm.ProverTask proverTaskOrm *orm.ProverTask
db *gorm.DB db *gorm.DB
cfg *config.ProverManager cfg *config.ProverManager
chainCfg *params.ChainConfig
verifier *verifier.Verifier verifier *verifier.Verifier
@@ -72,17 +66,14 @@ type ProofReceiverLogic struct {
} }
// NewSubmitProofReceiverLogic create a proof receiver logic // NewSubmitProofReceiverLogic create a proof receiver logic
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic { func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *ProofReceiverLogic {
return &ProofReceiverLogic{ return &ProofReceiverLogic{
chunkOrm: orm.NewChunk(db), chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db), batchOrm: orm.NewBatch(db),
bundleOrm: orm.NewBundle(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db), proverTaskOrm: orm.NewProverTask(db),
cfg: cfg, cfg: cfg,
chainCfg: chainCfg, db: db,
db: db,
verifier: vf, verifier: vf,
@@ -133,30 +124,47 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, chainCfg *params.Cha
// HandleZkProof handle a ZkProof submitted from a prover. // HandleZkProof handle a ZkProof submitted from a prover.
// For now only proving/verifying error will lead to setting status as skipped. // For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side. // db/unmarshal errors will not because they are errors on the business logic side.
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coordinatorType.SubmitProofParameter) error { func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) error {
m.proofReceivedTotal.Inc() m.proofReceivedTotal.Inc()
pk := ctx.GetString(coordinatorType.PublicKey) pk := ctx.GetString(coordinatorType.PublicKey)
if len(pk) == 0 { if len(pk) == 0 {
return errors.New("get public key from context failed") return fmt.Errorf("get public key from context failed")
} }
pv := ctx.GetString(coordinatorType.ProverVersion) pv := ctx.GetString(coordinatorType.ProverVersion)
if len(pv) == 0 { if len(pv) == 0 {
return errors.New("get ProverVersion from context failed") return fmt.Errorf("get ProverVersion from context failed")
}
// use hard_fork_name from parameter first
// if prover support multi hard_forks, the real hard_fork_name is not set to the gin context
hardForkName := proofParameter.HardForkName
if hardForkName == "" {
hardForkName = ctx.GetString(coordinatorType.HardForkName)
} }
proverTask, err := m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk) var proverTask *orm.ProverTask
if proverTask == nil || err != nil { var err error
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofParameter.TaskID, "error", err) if proofParameter.UUID != "" {
return ErrValidatorFailureProverTaskEmpty proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
} else {
// TODO When prover all have upgrade, need delete this logic
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
} }
proofTime := time.Since(proverTask.CreatedAt) proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds()) proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proofID", proofParameter.TaskID, "proverName", proverTask.ProverName, log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec) "proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
if err = m.validator(ctx.Copy(), proverTask, pk, proofParameter); err != nil { if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
return err return err
} }
@@ -164,39 +172,18 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
success := true success := true
var verifyErr error var verifyErr error
hardForkName, getHardForkErr := m.hardForkName(ctx, proofParameter.TaskID, proofParameter.TaskType) // only verify batch proof. chunk proof verifier have been disabled after Bernoulli
if getHardForkErr != nil { if proofMsg.Type == message.ProofTypeBatch {
return ErrGetHardForkNameFailed success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
}
switch message.ProofType(proofParameter.TaskType) {
case message.ProofTypeChunk:
var chunkProof message.ChunkProof
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &chunkProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyChunkProof(&chunkProof, hardForkName)
case message.ProofTypeBatch:
var batchProof message.BatchProof
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &batchProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyBatchProof(&batchProof, hardForkName)
case message.ProofTypeBundle:
var bundleProof message.BundleProof
if unmarshalErr := json.Unmarshal([]byte(proofParameter.Proof), &bundleProof); unmarshalErr != nil {
return unmarshalErr
}
success, verifyErr = m.verifier.VerifyBundleProof(&bundleProof, hardForkName)
} }
if verifyErr != nil || !success { if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc() m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofParameter) m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofParameter.TaskID, "prover name", proverTask.ProverName, log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofParameter.TaskType, "proof time", proofTimeSec, "error", verifyErr) "prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
if verifyErr != nil { if verifyErr != nil {
return ErrValidatorFailureVerifiedFailed return ErrValidatorFailureVerifiedFailed
@@ -206,13 +193,13 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofParameter coor
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds()) m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
log.Info("proof verified and valid", "proof id", proofParameter.TaskID, "prover name", proverTask.ProverName, log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofParameter.TaskType, "proof time", proofTimeSec) "prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
if err := m.closeProofTask(ctx.Copy(), proverTask, proofParameter, proofTimeSec); err != nil { if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc() m.proofSubmitFailure.Inc()
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofParameter) m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
return ErrCoordinatorInternalFailure return ErrCoordinatorInternalFailure
} }
@@ -225,6 +212,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
if err != nil { if err != nil {
return err return err
} }
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batch.BatchHash) allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batch.BatchHash)
if err != nil { if err != nil {
return err return err
@@ -238,7 +226,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
return nil return nil
} }
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofParameter coordinatorType.SubmitProofParameter) (err error) { func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
defer func() { defer func() {
if err != nil { if err != nil {
m.validateFailureTotal.Inc() m.validateFailureTotal.Inc()
@@ -255,9 +243,9 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
// (ii) set the maximum failure retry times // (ii) set the maximum failure retry times
log.Warn( log.Warn(
"cannot submit valid proof for a prover task twice", "cannot submit valid proof for a prover task twice",
"taskType", proverTask.TaskType, "hash", proofParameter.TaskID, "taskType", proverTask.TaskType, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion, "proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", proverTask.ProverPublicKey, "proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
) )
return ErrValidatorFailureProverTaskCannotSubmitTwice return ErrValidatorFailureProverTaskCannotSubmitTwice
} }
@@ -265,60 +253,59 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
proofTime := time.Since(proverTask.CreatedAt) proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds()) proofTimeSec := uint64(proofTime.Seconds())
if proofParameter.Status != int(message.StatusOk) { if proofMsg.Status != message.StatusOk {
// Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs. // Temporarily replace "panic" with "pa-nic" to prevent triggering the alert based on logs.
failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1) failureMsg := strings.Replace(proofParameter.FailureMsg, "panic", "pa-nic", -1)
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofParameter) m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeSubmitStatusNotOk, proofMsg)
m.validateFailureProverTaskStatusNotOk.Inc() m.validateFailureProverTaskStatusNotOk.Inc()
log.Info("proof generated by prover failed", log.Info("proof generated by prover failed",
"taskType", proofParameter.TaskType, "hash", proofParameter.TaskID, "proverName", proverTask.ProverName, "taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType, "proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", failureMsg) "failureMessage", failureMsg, "forkName", forkName)
return ErrValidatorFailureProofMsgStatusNotOk return ErrValidatorFailureProofMsgStatusNotOk
} }
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it // if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout { if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc() m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofParameter.TaskID, "taskType", proverTask.TaskType, log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec) "proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
return ErrValidatorFailureProofTimeout return ErrValidatorFailureProofTimeout
} }
// store the proof to prover task // store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofParameter); updateTaskProofErr != nil { if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofParameter.TaskID, "proverPublicKey", pk, log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr) "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
} }
// if the batch/chunk have proved and verifier success, need skip this submit proof // if the batch/chunk have proved and verifier success, need skip this submit proof
if m.checkIsTaskSuccess(ctx, proofParameter.TaskID, message.ProofType(proofParameter.TaskType)) { if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeObjectAlreadyVerified, proofParameter)
m.validateFailureProverTaskHaveVerifier.Inc() m.validateFailureProverTaskHaveVerifier.Inc()
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofParameter.TaskID, log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk) "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
return ErrValidatorFailureTaskHaveVerifiedSuccess return ErrValidatorFailureTaskHaveVerifiedSuccess
} }
return nil return nil
} }
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofParameter coordinatorType.SubmitProofParameter) { func (m *ProofReceiverLogic) proofRecover(ctx context.Context, proverTask *orm.ProverTask, failureType types.ProverTaskFailureType, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, log.Info("proof recover update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskUnassigned.String()) "taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskUnassigned.String())
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofInvalid, failureType, 0); err != nil { if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofInvalid, failureType, 0); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey, "error", err) log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", proverTask.TaskID, "pubKey", proverTask.ProverPublicKey, "error", err)
} }
} }
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofParameter coordinatorType.SubmitProofParameter, proofTimeSec uint64) error { func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, log.Info("proof close task update proof status", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey,
"taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String()) "taskType", message.ProofType(proverTask.TaskType).String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, proverTask, proofParameter, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil { if err := m.updateProofStatus(ctx, proverTask, proofMsg, types.ProverProofValid, types.ProverTaskFailureTypeUndefined, proofTimeSec); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err) log.Error("failed to updated proof status ProvingTaskVerified", "hash", proverTask.TaskID, "proverPublicKey", proverTask.ProverPublicKey, "error", err)
return err return err
} }
@@ -327,14 +314,14 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, proverTask *orm
// UpdateProofStatus update the chunk/batch task and session info status // UpdateProofStatus update the chunk/batch task and session info status
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask, func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *orm.ProverTask,
proofParameter coordinatorType.SubmitProofParameter, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error { proofMsg *message.ProofMsg, status types.ProverProveStatus, failureType types.ProverTaskFailureType, proofTimeSec uint64) error {
err := m.db.Transaction(func(tx *gorm.DB) error { err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatusAndFailureType(ctx, proverTask.UUID, status, failureType, tx); updateErr != nil { if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatusAndFailureType(ctx, proverTask.UUID, status, failureType, tx); updateErr != nil {
log.Error("failed to update prover task proving status and failure type", "uuid", proverTask.UUID, "error", updateErr) log.Error("failed to update prover task proving status and failure type", "uuid", proverTask.UUID, "error", updateErr)
return updateErr return updateErr
} }
switch message.ProofType(proofParameter.TaskType) { switch proofMsg.Type {
case message.ProofTypeChunk: case message.ProofTypeChunk:
if err := m.chunkOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil { if err := m.chunkOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "hash", proverTask.TaskID, "error", err) log.Error("failed to update chunk proving_status as failed", "hash", proverTask.TaskID, "error", err)
@@ -345,28 +332,21 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
log.Error("failed to update batch proving_status as failed", "hash", proverTask.TaskID, "error", err) log.Error("failed to update batch proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err return err
} }
case message.ProofTypeBundle:
if err := m.bundleOrm.DecreaseActiveAttemptsByHash(ctx, proverTask.TaskID, tx); err != nil {
log.Error("failed to update bundle proving_status as failed", "hash", proverTask.TaskID, "error", err)
return err
}
} }
// if the block batch has proof verified, so the failed status not update block batch proving status // if the block batch has proof verified, so the failed status not update block batch proving status
if m.checkIsTaskSuccess(ctx, proverTask.TaskID, message.ProofType(proofParameter.TaskType)) { if m.checkIsTaskSuccess(ctx, proverTask.TaskID, proofMsg.Type) {
log.Info("update proof status skip because this chunk/batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey) log.Info("update proof status skip because this chunk/batch has been verified", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey)
return nil return nil
} }
if status == types.ProverProofValid { if status == types.ProverProofValid {
var storeProofErr error var storeProofErr error
switch message.ProofType(proofParameter.TaskType) { switch proofMsg.Type {
case message.ProofTypeChunk: case message.ProofTypeChunk:
storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx) storeProofErr = m.chunkOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, types.ProvingTaskVerified, proofTimeSec, tx)
case message.ProofTypeBatch: case message.ProofTypeBatch:
storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx) storeProofErr = m.batchOrm.UpdateProofAndProvingStatusByHash(ctx, proofMsg.ID, proofMsg.BatchProof, types.ProvingTaskVerified, proofTimeSec, tx)
case message.ProofTypeBundle:
storeProofErr = m.bundleOrm.UpdateProofAndProvingStatusByHash(ctx, proofParameter.TaskID, []byte(proofParameter.Proof), types.ProvingTaskVerified, proofTimeSec, tx)
} }
if storeProofErr != nil { if storeProofErr != nil {
log.Error("failed to store chunk/batch proof and proving status", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr) log.Error("failed to store chunk/batch proof and proving status", "hash", proverTask.TaskID, "public key", proverTask.ProverPublicKey, "error", storeProofErr)
@@ -380,7 +360,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, proverTask *
return err return err
} }
if status == types.ProverProofValid && message.ProofType(proofParameter.TaskType) == message.ProofTypeChunk { if status == types.ProverProofValid && proofMsg.Type == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil { if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, proverTask.TaskID); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr) log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr return checkReadyErr
@@ -405,63 +385,24 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
if err != nil { if err != nil {
return false return false
} }
case message.ProofTypeBundle:
provingStatus, err = m.bundleOrm.GetProvingStatusByHash(ctx, hash)
if err != nil {
return false
}
} }
return provingStatus == types.ProvingTaskVerified return provingStatus == types.ProvingTaskVerified
} }
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofParameter coordinatorType.SubmitProofParameter) error { func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, proverTask *orm.ProverTask, proofMsg *message.ProofMsg) error {
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, []byte(proofParameter.Proof)) // store the proof to prover task
} var proofBytes []byte
var marshalErr error
func (m *ProofReceiverLogic) hardForkName(ctx *gin.Context, hash string, proofType int) (string, error) { switch proofMsg.Type {
var (
bundle *orm.Bundle
batch *orm.Batch
chunk *orm.Chunk
err error
)
switch message.ProofType(proofType) {
case message.ProofTypeChunk: case message.ProofTypeChunk:
chunk, err = m.chunkOrm.GetChunkByHash(ctx, hash) proofBytes, marshalErr = json.Marshal(proofMsg.ChunkProof)
case message.ProofTypeBatch: case message.ProofTypeBatch:
batch, err = m.batchOrm.GetBatchByHash(ctx, hash) proofBytes, marshalErr = json.Marshal(proofMsg.BatchProof)
case message.ProofTypeBundle:
bundle, err = m.bundleOrm.GetBundleByHash(ctx, hash)
} }
if err != nil { if len(proofBytes) == 0 || marshalErr != nil {
return "", err return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr)
} }
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proverTask.UUID, proofBytes)
if bundle != nil {
batch, err = m.batchOrm.GetBatchByHash(ctx, bundle.StartBatchHash)
if err != nil {
return "", err
}
}
if batch != nil {
chunk, err = m.chunkOrm.GetChunkByHash(ctx, batch.StartChunkHash)
if err != nil {
return "", err
}
}
if chunk == nil {
return "", errors.New("failed to find chunk")
}
l2Block, getBlockErr := m.blockOrm.GetL2BlockByNumber(ctx.Copy(), chunk.StartBlockNumber)
if getBlockErr != nil {
return "", getBlockErr
}
hardForkName := forks.GetHardforkName(m.chainCfg, l2Block.Number, l2Block.BlockTimestamp)
return hardForkName, nil
} }

View File

@@ -10,13 +10,29 @@ import (
// NewVerifier Sets up a mock verifier. // NewVerifier Sets up a mock verifier.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) { func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
batchVKMap := map[string]struct{}{"mock_vk": {}} batchVKMap := map[string]string{
chunkVKMap := map[string]struct{}{"mock_vk": {}} "shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
} }
// VerifyChunkProof return a mock verification result for a ChunkProof. // VerifyChunkProof return a mock verification result for a ChunkProof.
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) { func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
if string(proof.Proof) == InvalidTestProof { if string(proof.Proof) == InvalidTestProof {
return false, nil return false, nil
} }
@@ -30,11 +46,3 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string)
} }
return true, nil return true, nil
} }
// VerifyBundleProof return a mock verification result for a BundleProof.
func (v *Verifier) VerifyBundleProof(proof *message.BundleProof, forkName string) (bool, error) {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}

View File

@@ -9,8 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier. // Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct { type Verifier struct {
cfg *config.VerifierConfig cfg *config.VerifierConfig
ChunkVKMap map[string]struct{} ChunkVKMap map[string]string
BatchVKMap map[string]struct{} BatchVKMap map[string]string
BundleVkMap map[string]struct{}
} }

View File

@@ -11,101 +11,76 @@ package verifier
import "C" //nolint:typecheck import "C" //nolint:typecheck
import ( import (
"embed"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"io" "io"
"io/fs"
"os" "os"
"path" "path"
"unsafe" "unsafe"
"github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/config"
"scroll-tech/common/types/message"
) )
// This struct maps to `CircuitConfig` in common/libzkp/impl/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.CircuitConfig` being changed
type rustCircuitConfig struct {
ForkName string `json:"fork_name"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`
}
func newRustCircuitConfig(cfg *config.CircuitConfig) *rustCircuitConfig {
return &rustCircuitConfig{
ForkName: cfg.ForkName,
ParamsPath: cfg.ParamsPath,
AssetsPath: cfg.AssetsPath,
}
}
// This struct maps to `VerifierConfig` in common/libzkp/impl/src/verifier.rs
// Define a brand new struct here is to eliminate side effects in case fields
// in `*config.VerifierConfig` being changed
type rustVerifierConfig struct {
LowVersionCircuit *rustCircuitConfig `json:"low_version_circuit"`
HighVersionCircuit *rustCircuitConfig `json:"high_version_circuit"`
}
func newRustVerifierConfig(cfg *config.VerifierConfig) *rustVerifierConfig {
return &rustVerifierConfig{
LowVersionCircuit: newRustCircuitConfig(cfg.LowVersionCircuit),
HighVersionCircuit: newRustCircuitConfig(cfg.HighVersionCircuit),
}
}
// NewVerifier Sets up a rust ffi to call verify. // NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) { func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode { if cfg.MockMode {
chunkVKMap := map[string]struct{}{"mock_vk": {}} batchVKMap := map[string]string{
batchVKMap := map[string]struct{}{"mock_vk": {}} "shanghai": "",
bundleVKMap := map[string]struct{}{"mock_vk": {}} "bernoulli": "",
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap, BundleVkMap: bundleVKMap}, nil "london": "",
} "istanbul": "",
verifierConfig := newRustVerifierConfig(cfg) "homestead": "",
configBytes, err := json.Marshal(verifierConfig) "eip155": "",
if err != nil { }
return nil, err chunkVKMap := map[string]string{
} "shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
configStr := C.CString(string(configBytes)) batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath)
defer func() { defer func() {
C.free(unsafe.Pointer(configStr)) C.free(unsafe.Pointer(paramsPathStr))
C.free(unsafe.Pointer(assetsPathStr))
}() }()
C.init(configStr) C.init_batch_verifier(paramsPathStr, assetsPathStr)
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
v := &Verifier{ v := &Verifier{
cfg: cfg, cfg: cfg,
ChunkVKMap: make(map[string]struct{}), ChunkVKMap: make(map[string]string),
BatchVKMap: make(map[string]struct{}), BatchVKMap: make(map[string]string),
BundleVkMap: make(map[string]struct{}),
} }
bundleVK, err := v.readVK(path.Join(cfg.HighVersionCircuit.AssetsPath, "vk_bundle.vkey")) batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
batchVK, err := v.readVK(path.Join(cfg.HighVersionCircuit.AssetsPath, "vk_batch.vkey")) chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
chunkVK, err := v.readVK(path.Join(cfg.HighVersionCircuit.AssetsPath, "vk_chunk.vkey")) v.BatchVKMap[cfg.ForkName] = batchVK
if err != nil { v.ChunkVKMap[cfg.ForkName] = chunkVK
if err := v.loadEmbedVK(); err != nil {
return nil, err return nil, err
} }
v.BundleVkMap[bundleVK] = struct{}{}
v.BatchVKMap[batchVK] = struct{}{}
v.ChunkVKMap[chunkVK] = struct{}{}
if err := v.loadLowVersionVKs(cfg); err != nil {
return nil, err
}
v.loadCurieVersionVKs()
return v, nil return v, nil
} }
@@ -137,34 +112,7 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string)
} }
// VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier. // VerifyChunkProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof, forkName string) (bool, error) { func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled")
if string(proof.Proof) == InvalidTestProof {
return false, nil
}
return true, nil
}
buf, err := json.Marshal(proof)
if err != nil {
return false, err
}
log.Info("Start to verify chunk proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
verified := C.verify_chunk_proof(proofStr, forkNameStr)
return verified != 0, nil
}
// VerifyBundleProof Verify a ZkProof for a bundle of batches, by marshaling it and verifying it via the EVM verifier.
func (v *Verifier) VerifyBundleProof(proof *message.BundleProof, forkName string) (bool, error) {
if v.cfg.MockMode { if v.cfg.MockMode {
log.Info("Mock mode, verifier disabled") log.Info("Mock mode, verifier disabled")
if string(proof.Proof) == InvalidTestProof { if string(proof.Proof) == InvalidTestProof {
@@ -179,14 +127,12 @@ func (v *Verifier) VerifyBundleProof(proof *message.BundleProof, forkName string
} }
proofStr := C.CString(string(buf)) proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() { defer func() {
C.free(unsafe.Pointer(proofStr)) C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}() }()
log.Info("Start to verify bundle proof ...") log.Info("Start to verify chunk proof ...")
verified := C.verify_bundle_proof(proofStr, forkNameStr) verified := C.verify_chunk_proof(proofStr)
return verified != 0, nil return verified != 0, nil
} }
@@ -202,27 +148,23 @@ func (v *Verifier) readVK(filePat string) (string, error) {
return base64.StdEncoding.EncodeToString(byt), nil return base64.StdEncoding.EncodeToString(byt), nil
} }
// load low version vks, current is darwin //go:embed legacy_vk/*
func (v *Verifier) loadLowVersionVKs(cfg *config.VerifierConfig) error { var legacyVKFS embed.FS
bundleVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_bundle.vkey"))
func (v *Verifier) loadEmbedVK() error {
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
if err != nil { if err != nil {
log.Error("load embed batch vk failure", "err", err)
return err return err
} }
batchVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_batch.vkey"))
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
if err != nil { if err != nil {
log.Error("load embed chunk vk failure", "err", err)
return err return err
} }
chunkVK, err := v.readVK(path.Join(cfg.LowVersionCircuit.AssetsPath, "vk_chunk.vkey"))
if err != nil { v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes)
return err v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
}
v.BundleVkMap[bundleVK] = struct{}{}
v.BatchVKMap[batchVK] = struct{}{}
v.ChunkVKMap[chunkVK] = struct{}{}
return nil return nil
} }
func (v *Verifier) loadCurieVersionVKs() {
v.BatchVKMap["AAAAGgAAAARX2S0K1wF333B1waOsnG/vcASJmWG9YM6SNWCBy1ywD9jfGkei+f0wNYpkjW7JO12EfU7CjYVBo+PGku3zaQJI64lbn6BwyTBa4RfrPFpV5mP47ix0sXZ+Wt5wklMLRW7OIJb1yfCDm+gkSsp3/Zqrxt4SY4rQ4WtHfynTCQ0KDi78jNuiFvwxO3ub3DkgGVaxMkGxTRP/Vz6E7MCZMUBR5wZFcMzJn+73f0wYjDxfj00krg9O1VrwVxbVV1ycLR6oQLcOgm/l+xwth8io0vDpF9OY21gD5DgJn9GgcYe8KoRVEbEqApLZPdBibpcSMTY9czZI2LnFcqrDDmYvhEwgjhZrsTog2xLXOODoOupZ/is5ekQ9Gi0y871b1mLlCGA="] = struct{}{}
v.ChunkVKMap["AAAAGQAAAATyWEABRbJ6hQQ5/zLX1gTasr7349minA9rSgMS6gDeHwZKqikRiO3md+pXjjxMHnKQtmXYgMXhJSvlmZ+Ws+cheuly2X1RuNQzcZuRImaKPR9LJsVZYsXfJbuqdKX8p0Gj8G83wMJOmTzNVUyUol0w0lTU+CEiTpHOnxBsTF3EWaW3s1u4ycOgWt1c9M6s7WmaBZLYgAWYCunO5CLCLApNGbCASeck/LuSoedEri5u6HccCKU2khG6zl6W07jvYSbDVLJktbjRiHv+/HQix+K14j8boo8Z/unhpwXCsPxkQA=="] = struct{}{}
}

View File

@@ -18,8 +18,7 @@ import (
var ( var (
paramsPath = flag.String("params", "/assets/test_params", "params dir") paramsPath = flag.String("params", "/assets/test_params", "params dir")
assetsPathLo = flag.String("assets_lo", "/assets/test_assets_lo", "assets dir") assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
assetsPathHi = flag.String("assets", "/assets/test_assets", "assets dir")
batchProofPath = flag.String("batch_proof", "/assets/proof_data/batch_proof", "batch proof file path") batchProofPath = flag.String("batch_proof", "/assets/proof_data/batch_proof", "batch proof file path")
chunkProofPath1 = flag.String("chunk_proof1", "/assets/proof_data/chunk_proof1", "chunk proof file path 1") chunkProofPath1 = flag.String("chunk_proof1", "/assets/proof_data/chunk_proof1", "chunk proof file path 1")
chunkProofPath2 = flag.String("chunk_proof2", "/assets/proof_data/chunk_proof2", "chunk proof file path 2") chunkProofPath2 = flag.String("chunk_proof2", "/assets/proof_data/chunk_proof2", "chunk proof file path 2")
@@ -29,38 +28,28 @@ func TestFFI(t *testing.T) {
as := assert.New(t) as := assert.New(t)
cfg := &config.VerifierConfig{ cfg := &config.VerifierConfig{
MockMode: false, MockMode: false,
LowVersionCircuit: &config.CircuitConfig{ ParamsPath: *paramsPath,
ParamsPath: *paramsPath, AssetsPath: *assetsPath,
AssetsPath: *assetsPathLo,
ForkName: "darwin",
MinProverVersion: "",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: *paramsPath,
AssetsPath: *assetsPathHi,
ForkName: "darwinV2",
MinProverVersion: "",
},
} }
v, err := NewVerifier(cfg) v, err := NewVerifier(cfg)
as.NoError(err) as.NoError(err)
chunkProof1 := readChunkProof(*chunkProofPath1, as) chunkProof1 := readChunkProof(*chunkProofPath1, as)
chunkOk1, err := v.VerifyChunkProof(chunkProof1, "darwinV2") chunkOk1, err := v.VerifyChunkProof(chunkProof1)
as.NoError(err) as.NoError(err)
as.True(chunkOk1) as.True(chunkOk1)
t.Log("Verified chunk proof 1") t.Log("Verified chunk proof 1")
chunkProof2 := readChunkProof(*chunkProofPath2, as) chunkProof2 := readChunkProof(*chunkProofPath2, as)
chunkOk2, err := v.VerifyChunkProof(chunkProof2, "darwinV2") chunkOk2, err := v.VerifyChunkProof(chunkProof2)
as.NoError(err) as.NoError(err)
as.True(chunkOk2) as.True(chunkOk2)
t.Log("Verified chunk proof 2") t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as) batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof, "darwinV2") batchOk, err := v.VerifyBatchProof(batchProof, "curie")
as.NoError(err) as.NoError(err)
as.True(batchOk) as.True(batchOk)
t.Log("Verified batch proof") t.Log("Verified batch proof")

View File

@@ -2,6 +2,7 @@ package orm
import ( import (
"context" "context"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"time" "time"
@@ -12,6 +13,7 @@ import (
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils" "scroll-tech/common/utils"
) )
@@ -31,9 +33,6 @@ type Batch struct {
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"` WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"` ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"` BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"`
EnableCompress bool `json:"enable_compress" gorm:"column:enable_compress"`
BlobBytes []byte `json:"blob_bytes" gorm:"column:blob_bytes"`
// proof // proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"` ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
@@ -60,9 +59,6 @@ type Batch struct {
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"` BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"` BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// bundle
BundleHash string `json:"bundle_hash" gorm:"column:bundle_hash"`
// metadata // metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -80,12 +76,12 @@ func (*Batch) TableName() string {
} }
// GetUnassignedBatch retrieves unassigned batch based on the specified limit. // GetUnassignedBatch retrieves unassigned batch based on the specified limit.
// The returned batches are sorted in ascending order by their index. // The returned batch are sorted in ascending order by their index.
func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
var batch Batch var batch Batch
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;", sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady)) int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
err := db.Raw(sql).Scan(&batch).Error err := db.Raw(sql).Scan(&batch).Error
if err != nil { if err != nil {
return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err) return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err)
@@ -97,12 +93,12 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTo
} }
// GetAssignedBatch retrieves assigned batch based on the specified limit. // GetAssignedBatch retrieves assigned batch based on the specified limit.
// The returned batches are sorted in ascending order by their index. // The returned batch are sorted in ascending order by their index.
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
var batch Batch var batch Batch
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;", sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady)) int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
err := db.Raw(sql).Scan(&batch).Error err := db.Raw(sql).Scan(&batch).Error
if err != nil { if err != nil {
return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err) return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err)
@@ -188,59 +184,6 @@ func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int1
return batch.ActiveAttempts, batch.TotalAttempts, nil return batch.ActiveAttempts, batch.TotalAttempts, nil
} }
// CheckIfBundleBatchProofsAreReady checks if all proofs for all batches of a given bundleHash are collected.
func (o *Batch) CheckIfBundleBatchProofsAreReady(ctx context.Context, bundleHash string) (bool, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("bundle_hash = ? AND proving_status != ?", bundleHash, types.ProvingTaskVerified)
var count int64
if err := db.Count(&count).Error; err != nil {
return false, fmt.Errorf("Chunk.CheckIfBundleBatchProofsAreReady error: %w, bundle hash: %v", err, bundleHash)
}
return count == 0, nil
}
// GetBatchByHash retrieves the given batch.
func (o *Batch) GetBatchByHash(ctx context.Context, hash string) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash = ?", hash)
var batch Batch
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByHash error: %w, batch hash: %v", err, hash)
}
return &batch, nil
}
// GetBatchesByBundleHash retrieves the given batch.
func (o *Batch) GetBatchesByBundleHash(ctx context.Context, bundleHash string) ([]*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("bundle_hash = ?", bundleHash)
db = db.Order("index ASC")
var batches []*Batch
if err := db.Find(&batches).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchesByBundleHash error: %w, bundle hash: %v", err, bundleHash)
}
return batches, nil
}
// GetBatchByIndex retrieves the batch by the given index.
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("index = ?", index)
var batch Batch
if err := db.First(&batch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
}
return &batch, nil
}
// InsertBatch inserts a new batch into the database. // InsertBatch inserts a new batch into the database.
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) { func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
if batch == nil { if batch == nil {
@@ -374,14 +317,18 @@ func (o *Batch) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
} }
// UpdateProofAndProvingStatusByHash updates the batch proof and proving status by hash. // UpdateProofAndProvingStatusByHash updates the batch proof and proving status by hash.
func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.BatchProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db db := o.db
if len(dbTX) > 0 && dbTX[0] != nil { if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0] db = dbTX[0]
} }
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proof"] = proof updateFields["proof"] = proofBytes
updateFields["proving_status"] = provingStatus updateFields["proving_status"] = provingStatus
updateFields["proof_time_sec"] = proofTimeSec updateFields["proof_time_sec"] = proofTimeSec
updateFields["proved_at"] = utils.NowUTC() updateFields["proved_at"] = utils.NowUTC()

View File

@@ -1,228 +0,0 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/utils"
)
// Bundle represents a bundle of batches.
type Bundle struct {
db *gorm.DB `gorm:"column:-"`
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
StartBatchIndex uint64 `json:"start_batch_index" gorm:"column:start_batch_index"`
StartBatchHash string `json:"start_batch_hash" gorm:"column:start_batch_hash"`
EndBatchIndex uint64 `json:"end_batch_index" gorm:"column:end_batch_index"`
EndBatchHash string `json:"end_batch_hash" gorm:"column:end_batch_hash"`
// proof
BatchProofsStatus int16 `json:"batch_proofs_status" gorm:"column:batch_proofs_status;default:1"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
TotalAttempts int16 `json:"total_attempts" gorm:"column:total_attempts;default:0"`
ActiveAttempts int16 `json:"active_attempts" gorm:"column:active_attempts;default:0"`
// rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewBundle creates a new Bundle database instance.
func NewBundle(db *gorm.DB) *Bundle {
return &Bundle{db: db}
}
// TableName returns the table name for the Bundle model.
func (*Bundle) TableName() string {
return "bundle"
}
// GetUnassignedBundle retrieves unassigned bundle based on the specified limit.
// The returned batch sorts in ascending order by their index.
func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
var bundle Bundle
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady))
err := db.Raw(sql).Scan(&bundle).Error
if err != nil {
return nil, fmt.Errorf("Batch.GetUnassignedBundle error: %w", err)
}
if bundle.StartBatchHash == "" || bundle.EndBatchHash == "" {
return nil, nil
}
return &bundle, nil
}
// GetAssignedBundle retrieves assigned bundle based on the specified limit.
// The returned bundle sorts in ascending order by their index.
func (o *Bundle) GetAssignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {
var bundle Bundle
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM bundle WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND batch_proofs_status = %d AND bundle.deleted_at IS NULL ORDER BY bundle.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.BatchProofsStatusReady))
err := db.Raw(sql).Scan(&bundle).Error
if err != nil {
return nil, fmt.Errorf("Bundle.GetAssignedBatch error: %w", err)
}
if bundle.StartBatchHash == "" || bundle.EndBatchHash == "" {
return nil, nil
}
return &bundle, nil
}
// GetProvingStatusByHash retrieves the proving status of a bundle given its hash.
func (o *Bundle) GetProvingStatusByHash(ctx context.Context, hash string) (types.ProvingStatus, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Select("proving_status")
db = db.Where("hash = ?", hash)
var bundle Bundle
if err := db.Find(&bundle).Error; err != nil {
return types.ProvingStatusUndefined, fmt.Errorf("Bundle.GetProvingStatusByHash error: %w, batch hash: %v", err, hash)
}
return types.ProvingStatus(bundle.ProvingStatus), nil
}
// GetBundleByHash retrieves the given
func (o *Bundle) GetBundleByHash(ctx context.Context, bundleHash string) (*Bundle, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash = ?", bundleHash)
var bundle Bundle
if err := db.First(&bundle).Error; err != nil {
return nil, fmt.Errorf("Bundle.GetBundleByHash error: %w, bundle hash: %v", err, bundleHash)
}
return &bundle, nil
}
// GetUnassignedAndBatchesUnreadyBundles get the bundles which is unassigned and batches are not ready
func (o *Bundle) GetUnassignedAndBatchesUnreadyBundles(ctx context.Context, offset, limit int) ([]*Bundle, error) {
if offset < 0 || limit < 0 {
return nil, errors.New("limit and offset must not be smaller than 0")
}
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
db = db.Where("batch_proofs_status = ?", types.BatchProofsStatusPending)
db = db.Order("index ASC")
db = db.Offset(offset)
db = db.Limit(limit)
var bundles []*Bundle
if err := db.Find(&bundles).Error; err != nil {
return nil, fmt.Errorf("Bundle.GetUnassignedAndBatchesUnreadyBundles error: %w", err)
}
return bundles, nil
}
// UpdateBatchProofsStatusByBundleHash updates the status of batch_proofs_status field for a given bundle hash.
func (o *Bundle) UpdateBatchProofsStatusByBundleHash(ctx context.Context, bundleHash string, status types.BatchProofsStatus) error {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash = ?", bundleHash)
if err := db.Update("batch_proofs_status", status).Error; err != nil {
return fmt.Errorf("Bundle.UpdateBatchProofsStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String())
}
return nil
}
// UpdateProvingStatusFailed updates the proving status failed of a bundle.
func (o *Bundle) UpdateProvingStatusFailed(ctx context.Context, bundleHash string, maxAttempts uint8, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash", bundleHash)
db = db.Where("total_attempts >= ?", maxAttempts)
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
if err := db.Update("proving_status", int(types.ProvingTaskFailed)).Error; err != nil {
return fmt.Errorf("Bundle.UpdateProvingStatus error: %w, bundle hash: %v, status: %v", err, bundleHash, types.ProvingTaskFailed.String())
}
return nil
}
// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash.
func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proof"] = proof
updateFields["proving_status"] = provingStatus
updateFields["proof_time_sec"] = proofTimeSec
updateFields["proved_at"] = utils.NowUTC()
db = db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateProofByHash error: %w, batch hash: %v", err, hash)
}
return nil
}
// UpdateBundleAttempts atomically increments the attempts count for the earliest available bundle that meets the conditions.
func (o *Bundle) UpdateBundleAttempts(ctx context.Context, hash string, curActiveAttempts, curTotalAttempts int16) (int64, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash = ?", hash)
db = db.Where("active_attempts = ?", curActiveAttempts)
db = db.Where("total_attempts = ?", curTotalAttempts)
result := db.Updates(map[string]interface{}{
"proving_status": types.ProvingTaskAssigned,
"total_attempts": gorm.Expr("total_attempts + 1"),
"active_attempts": gorm.Expr("active_attempts + 1"),
})
if result.Error != nil {
return 0, fmt.Errorf("failed to update bundle, err:%w", result.Error)
}
return result.RowsAffected, nil
}
// DecreaseActiveAttemptsByHash decrements the active_attempts of a bundle given its hash.
func (o *Bundle) DecreaseActiveAttemptsByHash(ctx context.Context, bundleHash string, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("hash = ?", bundleHash)
db = db.Where("proving_status != ?", int(types.ProvingTaskVerified))
db = db.Where("active_attempts > ?", 0)
result := db.UpdateColumn("active_attempts", gorm.Expr("active_attempts - 1"))
if result.Error != nil {
return fmt.Errorf("Bundle.DecreaseActiveAttemptsByHash error: %w, bundle hash: %v", result.Error, bundleHash)
}
if result.RowsAffected == 0 {
log.Warn("No rows were affected in DecreaseActiveAttemptsByHash", "bundle hash", bundleHash)
}
return nil
}

View File

@@ -2,7 +2,6 @@ package orm
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"time" "time"
@@ -54,7 +53,7 @@ func (r *Challenge) InsertChallenge(ctx context.Context, challengeString string)
return fmt.Errorf("the challenge string:%s have been used", challengeString) return fmt.Errorf("the challenge string:%s have been used", challengeString)
} }
return errors.New("insert challenge string affected rows more than 1") return fmt.Errorf("insert challenge string affected rows more than 1")
} }
// DeleteExpireChallenge delete the expire challenge // DeleteExpireChallenge delete the expire challenge

View File

@@ -74,11 +74,11 @@ func (*Chunk) TableName() string {
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit. // GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index. // The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) { func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
var chunk Chunk var chunk Chunk
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND end_block_number <= %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;", sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, height) int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
err := db.Raw(sql).Scan(&chunk).Error err := db.Raw(sql).Scan(&chunk).Error
if err != nil { if err != nil {
return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err) return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
@@ -91,11 +91,11 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTo
// GetAssignedChunk retrieves assigned chunk based on the specified limit. // GetAssignedChunk retrieves assigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index. // The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) { func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
var chunk Chunk var chunk Chunk
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND end_block_number <= %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;", sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, height) int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
err := db.Raw(sql).Scan(&chunk).Error err := db.Raw(sql).Scan(&chunk).Error
if err != nil { if err != nil {
return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err) return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
@@ -340,14 +340,18 @@ func (o *Chunk) UpdateProvingStatusFailed(ctx context.Context, hash string, maxA
} }
// UpdateProofAndProvingStatusByHash updates the chunk proof and proving_status by hash. // UpdateProofAndProvingStatusByHash updates the chunk proof and proving_status by hash.
func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof []byte, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { func (o *Chunk) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.ChunkProof, status types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db db := o.db
if len(dbTX) > 0 && dbTX[0] != nil { if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0] db = dbTX[0]
} }
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
updateFields := make(map[string]interface{}) updateFields := make(map[string]interface{})
updateFields["proof"] = proof updateFields["proof"] = proofBytes
updateFields["proving_status"] = int(status) updateFields["proving_status"] = int(status)
updateFields["proof_time_sec"] = proofTimeSec updateFields["proof_time_sec"] = proofTimeSec
updateFields["proved_at"] = utils.NowUTC() updateFields["proved_at"] = utils.NowUTC()

View File

@@ -74,68 +74,6 @@ func (o *L2Block) GetL2BlockHashesByChunkHash(ctx context.Context, chunkHash str
return blockHashes, nil return blockHashes, nil
} }
// GetL2BlockByNumber retrieves the L2 block by l2 block number
func (o *L2Block) GetL2BlockByNumber(ctx context.Context, blockNumber uint64) (*L2Block, error) {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number = ?", blockNumber)
var l2Block L2Block
if err := db.First(&l2Block).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlockByNumber error: %w, chunk block number: %v", err, blockNumber)
}
return &l2Block, nil
}
// GetL2BlocksInRange retrieves the L2 blocks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end block numbers.
// The returned blocks are sorted in ascending order by their block number.
func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint64, endBlockNumber uint64) ([]*encoding.Block, error) {
if startBlockNumber > endBlockNumber {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: start block number should be less than or equal to end block number, start block: %v, end block: %v", startBlockNumber, endBlockNumber)
}
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Select("header, transactions, withdraw_root, row_consumption")
db = db.Where("number >= ? AND number <= ?", startBlockNumber, endBlockNumber)
db = db.Order("number ASC")
var l2Blocks []L2Block
if err := db.Find(&l2Blocks).Error; err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
// sanity check
if uint64(len(l2Blocks)) != endBlockNumber-startBlockNumber+1 {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange: unexpected number of results, expected: %v, got: %v", endBlockNumber-startBlockNumber+1, len(l2Blocks))
}
var blocks []*encoding.Block
for _, v := range l2Blocks {
var block encoding.Block
if err := json.Unmarshal([]byte(v.Transactions), &block.Transactions); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
block.Header = &gethTypes.Header{}
if err := json.Unmarshal([]byte(v.Header), block.Header); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
block.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
if err := json.Unmarshal([]byte(v.RowConsumption), &block.RowConsumption); err != nil {
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
}
blocks = append(blocks, &block)
}
return blocks, nil
}
// InsertL2Blocks inserts l2 blocks into the "l2_block" table. // InsertL2Blocks inserts l2 blocks into the "l2_block" table.
// for unit test // for unit test
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block) error { func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block) error {

View File

@@ -116,6 +116,25 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
return proverTasks, nil return proverTasks, nil
} }
// GetAssignedProverTaskByTaskIDAndProver get prover task taskID and public key
// TODO: when prover all upgrade need DEPRECATED this function
func (o *ProverTask) GetAssignedProverTaskByTaskIDAndProver(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey, proverVersion string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", taskID)
db = db.Where("prover_public_key", proverPublicKey)
db = db.Where("prover_version", proverVersion)
db = db.Where("proving_status", types.ProverAssigned)
var proverTask ProverTask
err := db.First(&proverTask).Error
if err != nil {
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndProver err:%w, taskID:%s, pubkey:%s, prover_version:%s", err, taskID, proverPublicKey, proverVersion)
}
return &proverTask, nil
}
// GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key // GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key
func (o *ProverTask) GetProverTaskByUUIDAndPublicKey(ctx context.Context, uuid, publicKey string) (*ProverTask, error) { func (o *ProverTask) GetProverTaskByUUIDAndPublicKey(ctx context.Context, uuid, publicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx) db := o.db.WithContext(ctx)

View File

@@ -1,15 +1,6 @@
package types package types
import ( import "time"
"crypto/ecdsa"
"encoding/hex"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
const ( const (
// PublicKey the public key for context // PublicKey the public key for context
@@ -18,96 +9,26 @@ const (
ProverName = "prover_name" ProverName = "prover_name"
// ProverVersion the prover version for context // ProverVersion the prover version for context
ProverVersion = "prover_version" ProverVersion = "prover_version"
// HardForkName the hard fork name for context // HardForkName the fork name for context
HardForkName = "hard_fork_name" HardForkName = "hard_fork_name"
) )
// Message the login message struct
type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
}
// LoginParameter for /login api
type LoginParameter struct {
Message Message `form:"message" json:"message" binding:"required"`
Signature string `form:"signature" json:"signature" binding:"required"`
}
// LoginSchema for /login response // LoginSchema for /login response
type LoginSchema struct { type LoginSchema struct {
Time time.Time `json:"time"` Time time.Time `json:"time"`
Token string `json:"token"` Token string `json:"token"`
} }
// Message the login message struct
type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
VKs []string `form:"vks" json:"vks"`
}
// LoginParameterWithHardForkName constructs new payload for login
type LoginParameterWithHardForkName struct {
LoginParameter
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
}
// LoginParameter for /login api
type LoginParameter struct {
Message Message `form:"message" json:"message" binding:"required"`
PublicKey string `form:"public_key" json:"public_key"`
Signature string `form:"signature" json:"signature" binding:"required"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Message.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *LoginParameter) Verify() (bool, error) {
hash, err := a.Message.Hash()
if err != nil {
return false, err
}
expectedPubKey, err := a.Message.DecodeAndUnmarshalPubkey(a.PublicKey)
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
isValid := crypto.VerifySignature(crypto.CompressPubkey(expectedPubKey), hash, sig[:len(sig)-1])
return isValid, nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Message) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// DecodeAndUnmarshalPubkey decodes a hex-encoded public key and unmarshal it into an ecdsa.PublicKey
func (i *Message) DecodeAndUnmarshalPubkey(pubKeyHex string) (*ecdsa.PublicKey, error) {
// Decode hex string to bytes
byteKey, err := hex.DecodeString(pubKeyHex)
if err != nil {
return nil, err
}
// Unmarshal bytes to ECDSA public key
pubKey, err := crypto.DecompressPubkey(byteKey)
if err != nil {
return nil, err
}
return pubKey, nil
}

View File

@@ -1,77 +0,0 @@
package types
import (
"encoding/hex"
"testing"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
func TestAuthMessageSignAndVerify(t *testing.T) {
privateKey, err := crypto.GenerateKey()
assert.NoError(t, err)
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&privateKey.PublicKey))
var authMsg LoginParameter
t.Run("sign", func(t *testing.T) {
authMsg = LoginParameter{
Message: Message{
ProverName: "test1",
ProverVersion: "v0.0.1",
Challenge: "abcdef",
ProverTypes: []ProverType{ProverTypeBatch},
VKs: []string{"vk1", "vk2"},
},
PublicKey: publicKeyHex,
}
err = authMsg.SignWithKey(privateKey)
assert.NoError(t, err)
})
t.Run("valid verify", func(t *testing.T) {
ok, verifyErr := authMsg.Verify()
assert.True(t, ok)
assert.NoError(t, verifyErr)
})
t.Run("invalid verify", func(t *testing.T) {
authMsg.Message.Challenge = "abcdefgh"
ok, verifyErr := authMsg.Verify()
assert.False(t, ok)
assert.NoError(t, verifyErr)
})
}
// TestGenerateSignature this unit test isn't for test, just generate the signature for manually test.
func TestGenerateSignature(t *testing.T) {
privateKeyHex := "8b8df68fddf7ee2724b79ccbd07799909d59b4dd4f4df3f6ecdc4fb8d56bdf4c"
privateKeyBytes, err := hex.DecodeString(privateKeyHex)
assert.Nil(t, err)
privateKey, err := crypto.ToECDSA(privateKeyBytes)
assert.NoError(t, err)
assert.NoError(t, err)
publicKeyHex := common.Bytes2Hex(crypto.CompressPubkey(&privateKey.PublicKey))
t.Log("publicKey: ", publicKeyHex)
authMsg := LoginParameter{
Message: Message{
ProverName: "test",
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
ProverTypes: []ProverType{ProverTypeChunk},
VKs: []string{"mock_vk"},
},
PublicKey: publicKeyHex,
}
err = authMsg.SignWithKey(privateKey)
assert.NoError(t, err)
t.Log("signature: ", authMsg.Signature)
verify, err := authMsg.Verify()
assert.NoError(t, err)
assert.True(t, verify)
}

View File

@@ -2,8 +2,10 @@ package types
// GetTaskParameter for ProverTasks request parameter // GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct { type GetTaskParameter struct {
ProverHeight uint64 `form:"prover_height" json:"prover_height"` ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskTypes []int `form:"task_types" json:"task_types"` TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"` // will be deprecated after all go_prover offline
VKs []string `form:"vks" json:"vks"` // for rust_prover that supporting multi-circuits
} }
// GetTaskSchema the schema data return to prover for get prover task // GetTaskSchema the schema data return to prover for get prover task

View File

@@ -1,42 +0,0 @@
package types
import (
"fmt"
"scroll-tech/common/types/message"
)
// ProverType represents the type of prover.
type ProverType uint8
func (r ProverType) String() string {
switch r {
case ProverTypeChunk:
return "prover type chunk"
case ProverTypeBatch:
return "prover type batch"
default:
return fmt.Sprintf("illegal prover type: %d", r)
}
}
const (
// ProverTypeUndefined is an unknown prover type
ProverTypeUndefined ProverType = iota
// ProverTypeChunk signals it's a chunk prover, which can prove chunk_tasks
ProverTypeChunk
// ProverTypeBatch signals it's a batch prover, which can prove batch_tasks and bundle_tasks
ProverTypeBatch
)
// MakeProverType make ProverType from ProofType
func MakeProverType(proof_type message.ProofType) ProverType {
switch proof_type {
case message.ProofTypeChunk:
return ProverTypeChunk
case message.ProofTypeBatch, message.ProofTypeBundle:
return ProverTypeBatch
default:
return ProverTypeUndefined
}
}

View File

@@ -2,11 +2,13 @@ package types
// SubmitProofParameter the SubmitProof api request parameter // SubmitProofParameter the SubmitProof api request parameter
type SubmitProofParameter struct { type SubmitProofParameter struct {
UUID string `form:"uuid" json:"uuid"` // TODO when prover have upgrade, need change this field to required
TaskID string `form:"task_id" json:"task_id" binding:"required"` UUID string `form:"uuid" json:"uuid"`
TaskType int `form:"task_type" json:"task_type" binding:"required"` TaskID string `form:"task_id" json:"task_id" binding:"required"`
Status int `form:"status" json:"status"` TaskType int `form:"task_type" json:"task_type" binding:"required"`
Proof string `form:"proof" json:"proof"` Status int `form:"status" json:"status"`
FailureType int `form:"failure_type" json:"failure_type"` Proof string `form:"proof" json:"proof"`
FailureMsg string `form:"failure_msg" json:"failure_msg"` FailureType int `form:"failure_type" json:"failure_type"`
FailureMsg string `form:"failure_msg" json:"failure_msg"`
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
} }

View File

@@ -33,6 +33,13 @@ import (
"scroll-tech/coordinator/internal/route" "scroll-tech/coordinator/internal/route"
) )
const (
forkNumberFour = 4
forkNumberThree = 3
forkNumberTwo = 2
forkNumberOne = 1
)
var ( var (
conf *config.Config conf *config.Config
@@ -45,10 +52,17 @@ var (
proverTaskOrm *orm.ProverTask proverTaskOrm *orm.ProverTask
proverBlockListOrm *orm.ProverBlockList proverBlockListOrm *orm.ProverBlockList
block1 *encoding.Block block1 *encoding.Block
block2 *encoding.Block block2 *encoding.Block
chunk *encoding.Chunk
batch *encoding.Batch chunk *encoding.Chunk
hardForkChunk1 *encoding.Chunk
hardForkChunk2 *encoding.Chunk
batch *encoding.Batch
hardForkBatch1 *encoding.Batch
hardForkBatch2 *encoding.Batch
tokenTimeout int tokenTimeout int
) )
@@ -66,7 +80,7 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64()) return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
} }
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, forks []string) (*cron.Collector, *http.Server) { func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
var err error var err error
db, err = testApps.GetGormDBClient() db, err = testApps.GetGormDBClient()
@@ -75,7 +89,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))
tokenTimeout = 60 tokenTimeout = 6
conf = &config.Config{ conf = &config.Config{
L2: &config.L2{ L2: &config.L2{
ChainID: 111, ChainID: 111,
@@ -84,23 +98,12 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
ProversPerSession: proversPerSession, ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{ Verifier: &config.VerifierConfig{
MockMode: true, MockMode: true,
LowVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "homestead",
MinProverVersion: "v4.2.0",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "bernoulli",
MinProverVersion: "v4.3.0",
},
}, },
BatchCollectionTimeSec: 10, BatchCollectionTimeSec: 10,
ChunkCollectionTimeSec: 10, ChunkCollectionTimeSec: 10,
BundleCollectionTimeSec: 10, MaxVerifierWorkers: 10,
SessionAttempts: 5, SessionAttempts: 5,
MinProverVersion: version.Version,
}, },
Auth: &config.Auth{ Auth: &config.Auth{
ChallengeExpireDurationSec: tokenTimeout, ChallengeExpireDurationSec: tokenTimeout,
@@ -109,12 +112,20 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
} }
var chainConf params.ChainConfig var chainConf params.ChainConfig
for _, forkName := range forks { for forkName, forkNumber := range nameForkMap {
switch forkName { switch forkName {
case "shanghai":
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
case "bernoulli": case "bernoulli":
chainConf.BernoulliBlock = big.NewInt(100) chainConf.BernoulliBlock = big.NewInt(forkNumber)
case "london":
chainConf.LondonBlock = big.NewInt(forkNumber)
case "istanbul":
chainConf.IstanbulBlock = big.NewInt(forkNumber)
case "homestead": case "homestead":
chainConf.HomesteadBlock = big.NewInt(0) chainConf.HomesteadBlock = big.NewInt(forkNumber)
case "eip155":
chainConf.EIP155Block = big.NewInt(forkNumber)
} }
} }
@@ -141,7 +152,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
func setEnv(t *testing.T) { func setEnv(t *testing.T) {
var err error var err error
version.Version = "v4.2.0" version.Version = "v4.1.98"
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat())) glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo) glogger.Verbosity(log.LvlInfo)
@@ -175,9 +186,14 @@ func setEnv(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
chunk = &encoding.Chunk{Blocks: []*encoding.Block{block1, block2}} chunk = &encoding.Chunk{Blocks: []*encoding.Block{block1, block2}}
assert.NoError(t, err) hardForkChunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}} hardForkChunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
assert.NoError(t, err)
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
hardForkBatch1 = &encoding.Batch{Index: 1, Chunks: []*encoding.Chunk{hardForkChunk1}}
hardForkBatch2 = &encoding.Batch{Index: 2, Chunks: []*encoding.Chunk{hardForkChunk2}}
} }
func TestApis(t *testing.T) { func TestApis(t *testing.T) {
@@ -192,12 +208,13 @@ func TestApis(t *testing.T) {
t.Run("TestInvalidProof", testInvalidProof) t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed) t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
t.Run("TestTimeoutProof", testTimeoutProof) t.Run("TestTimeoutProof", testTimeoutProof)
t.Run("TestHardFork", testHardForkAssignTask)
} }
func testHandshake(t *testing.T) { func testHandshake(t *testing.T) {
// Setup coordinator and http server. // Setup coordinator and http server.
coordinatorURL := randomURL() coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"}) proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
defer func() { defer func() {
proofCollector.Stop() proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background())) assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -210,7 +227,7 @@ func testHandshake(t *testing.T) {
func testFailedHandshake(t *testing.T) { func testFailedHandshake(t *testing.T) {
// Setup coordinator and http server. // Setup coordinator and http server.
coordinatorURL := randomURL() coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"homestead"}) proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
defer func() { defer func() {
proofCollector.Stop() proofCollector.Stop()
}() }()
@@ -228,7 +245,7 @@ func testFailedHandshake(t *testing.T) {
func testGetTaskBlocked(t *testing.T) { func testGetTaskBlocked(t *testing.T) {
coordinatorURL := randomURL() coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"}) collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
defer func() { defer func() {
collector.Stop() collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background())) assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -244,14 +261,14 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion) expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk) code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, errors.New(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = errors.New("get empty prover task") expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code) assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, errors.New(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
err = proverBlockListOrm.InsertProverPublicKey(context.Background(), batchProver.proverName, batchProver.publicKey()) err = proverBlockListOrm.InsertProverPublicKey(context.Background(), batchProver.proverName, batchProver.publicKey())
assert.NoError(t, err) assert.NoError(t, err)
@@ -259,20 +276,20 @@ func testGetTaskBlocked(t *testing.T) {
err = proverBlockListOrm.DeleteProverPublicKey(context.Background(), chunkProver.publicKey()) err = proverBlockListOrm.DeleteProverPublicKey(context.Background(), chunkProver.publicKey())
assert.NoError(t, err) assert.NoError(t, err)
expectedErr = errors.New("get empty prover task") expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk) code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code) assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, errors.New(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion) expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, errors.New(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
} }
func testOutdatedProverVersion(t *testing.T) { func testOutdatedProverVersion(t *testing.T) {
coordinatorURL := randomURL() coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"}) collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
defer func() { defer func() {
collector.Stop() collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background())) assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -284,22 +301,252 @@ func testOutdatedProverVersion(t *testing.T) {
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999") batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
assert.True(t, chunkProver.healthCheckSuccess(t)) assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, chunkProver.proverVersion) code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, types.ErrJWTCommonErr, code) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
assert.Equal(t, expectedErr, errors.New(errMsg))
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, batchProver.proverVersion) code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, types.ErrJWTCommonErr, code) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
assert.Equal(t, expectedErr, errors.New(errMsg)) }
func testHardForkAssignTask(t *testing.T) {
tests := []struct {
name string
proofType message.ProofType
forkNumbers map[string]int64
proverForkNames []string
exceptTaskNumber int
exceptGetTaskErrCodes []int
exceptGetTaskErrMsgs []string
}{
{ // hard fork 4, prover 4 block [2-3]
name: "noTaskForkChunkProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
exceptTaskNumber: 0,
proverForkNames: []string{"bernoulli", "bernoulli"},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{
name: "noTaskForkBatchProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
exceptTaskNumber: 0,
proverForkNames: []string{"bernoulli", "bernoulli"},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{ // hard fork 1, prover 1 block [2-3]
name: "noTaskForkChunkProverVersionLessThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne},
exceptTaskNumber: 0,
proverForkNames: []string{"homestead", "homestead"},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{
name: "noTaskForkBatchProverVersionLessThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne},
exceptTaskNumber: 0,
proverForkNames: []string{"homestead", "homestead"},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 0,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
},
{ // hard fork 3, prover 3 block [2-3]
name: "oneTaskForkChunkProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"london", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
{
name: "oneTaskForkBatchProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"london", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
{ // hard fork 2, prover 2 block [2-3]
name: "oneTaskForkChunkProverVersionLessThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
{
name: "oneTaskForkBatchProverVersionLessThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
{ // hard fork 2, prover 2 block [2-3]
name: "twoTaskForkChunkProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{
name: "twoTaskForkBatchProverVersionLargeOrEqualThanHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{ // hard fork 4, prover 3 block [2-3]
name: "twoTaskForkChunkProverVersionLessThanHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"bernoulli": forkNumberFour, "istanbul": forkNumberTwo},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "istanbul"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
name: "twoTaskForkChunkProverVersionMiddleHardFork",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{
name: "twoTaskForkBatchProverVersionMiddleHardFork",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"istanbul", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
exceptGetTaskErrMsgs: []string{"", ""},
},
{ // hard fork 2, prover 2 block [2-3]
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, tt.forkNumbers)
defer func() {
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
chunkProof := &message.ChunkProof{
StorageTrace: []byte("testStorageTrace"),
Protocol: []byte("testProtocol"),
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
}
// the insert block number is 2 and 3
// chunk1 batch1 contains block number 2
// chunk2 batch2 contains block number 3
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
dbHardForkChunk1, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk1)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 2, dbHardForkChunk1.Hash)
assert.NoError(t, err)
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk1.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
assert.NoError(t, err)
dbHardForkBatch1, err := batchOrm.InsertBatch(context.Background(), hardForkBatch1)
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, dbHardForkBatch1.Hash)
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch1.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
dbHardForkChunk2, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk2)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 3, 100, dbHardForkChunk2.Hash)
assert.NoError(t, err)
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk2.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
assert.NoError(t, err)
dbHardForkBatch2, err := batchOrm.InsertBatch(context.Background(), hardForkBatch2)
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 1, 1, dbHardForkBatch2.Hash)
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch2.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
getTaskNumber := 0
for i := 0; i < 2; i++ {
mockProver := newMockProver(t, fmt.Sprintf("mock_prover_%d", i), coordinatorURL, tt.proofType, version.Version)
proverTask, errCode, errMsg := mockProver.getProverTask(t, tt.proofType, tt.proverForkNames[i])
assert.Equal(t, tt.exceptGetTaskErrCodes[i], errCode)
assert.Equal(t, tt.exceptGetTaskErrMsgs[i], errMsg)
if errCode != types.Success {
continue
}
getTaskNumber++
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
}
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
})
}
} }
func testValidProof(t *testing.T) { func testValidProof(t *testing.T) {
coordinatorURL := randomURL() coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"homestead"}) collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
defer func() { defer func() {
collector.Stop() collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background())) assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -328,12 +575,12 @@ func testValidProof(t *testing.T) {
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version) provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
exceptProofStatus := verifiedSuccess proofStatus := verifiedSuccess
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType) proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
assert.Equal(t, types.Success, errCode) assert.Equal(t, errCode, types.Success)
assert.Equal(t, "", errMsg) assert.Equal(t, errMsg, "")
assert.NotNil(t, proverTask) assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, exceptProofStatus, types.Success) provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
} }
// verify proof status // verify proof status
@@ -382,7 +629,7 @@ func testValidProof(t *testing.T) {
func testInvalidProof(t *testing.T) { func testInvalidProof(t *testing.T) {
// Setup coordinator and ws server. // Setup coordinator and ws server.
coordinatorURL := randomURL() coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"}) collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
defer func() { defer func() {
collector.Stop() collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background())) assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -394,69 +641,39 @@ func testInvalidProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash) err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err) assert.NoError(t, err)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch) batch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err) assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, dbBatch.Hash) err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbBatch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err) assert.NoError(t, err)
// create mock provers. proofType := message.ProofTypeBatch
provers := make([]*mockProver, 2) provingStatus := verifiedFailed
for i := 0; i < len(provers); i++ { expectErrCode := types.ErrCoordinatorHandleZkProofFailure
var ( prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
proofType message.ProofType proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
provingStatus proofStatus assert.NotNil(t, proverTask)
exceptCode int assert.Equal(t, errCode, types.Success)
) assert.Equal(t, errMsg, "")
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
if i%2 == 0 {
proofType = message.ProofTypeChunk
provingStatus = verifiedSuccess
exceptCode = types.Success
} else {
proofType = message.ProofTypeBatch
provingStatus = verifiedFailed
exceptCode = types.ErrCoordinatorHandleZkProofFailure
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType)
assert.Equal(t, types.Success, errCode)
assert.Equal(t, "", errMsg)
assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, provingStatus, exceptCode)
}
// verify proof status // verify proof status
var ( var (
tick = time.Tick(1500 * time.Millisecond) tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute) tickStop = time.Tick(time.Minute)
chunkProofStatus types.ProvingStatus
batchProofStatus types.ProvingStatus batchProofStatus types.ProvingStatus
batchActiveAttempts int16 batchActiveAttempts int16
batchMaxAttempts int16 batchMaxAttempts int16
chunkActiveAttempts int16
chunkMaxAttempts int16
) )
for { for {
select { select {
case <-tick: case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err) assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), dbBatch.Hash) if batchProofStatus == types.ProvingTaskAssigned {
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskAssigned {
return return
} }
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), dbBatch.Hash)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts)) assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts)) assert.Equal(t, 0, int(batchActiveAttempts))
@@ -470,7 +687,7 @@ func testInvalidProof(t *testing.T) {
func testProofGeneratedFailed(t *testing.T) { func testProofGeneratedFailed(t *testing.T) {
// Setup coordinator and ws server. // Setup coordinator and ws server.
coordinatorURL := randomURL() coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, []string{"darwinV2"}) collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
defer func() { defer func() {
collector.Stop() collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background())) assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -482,38 +699,26 @@ func testProofGeneratedFailed(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash) err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err) assert.NoError(t, err)
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch) batch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err) assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, dbBatch.Hash) err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbBatch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err) assert.NoError(t, err)
// create mock provers. // create mock provers.
provers := make([]*mockProver, 2) provers := make([]*mockProver, 2)
for i := 0; i < len(provers); i++ { for i := 0; i < len(provers); i++ {
var ( var proofType message.ProofType
proofType message.ProofType
exceptCode int
exceptErrMsg string
)
if i%2 == 0 { if i%2 == 0 {
proofType = message.ProofTypeChunk proofType = message.ProofTypeChunk
exceptCode = types.Success
exceptErrMsg = ""
} else { } else {
proofType = message.ProofTypeBatch proofType = message.ProofTypeBatch
exceptCode = types.ErrCoordinatorGetTaskFailure
exceptErrMsg = "return prover task err:coordinator internal error"
} }
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version) provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType) proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
assert.NotNil(t, proverTask) assert.NotNil(t, proverTask)
assert.Equal(t, errCode, exceptCode) assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, exceptErrMsg) assert.Equal(t, errMsg, "")
if errCode == types.Success { provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
}
} }
// verify proof status // verify proof status
@@ -538,7 +743,7 @@ func testProofGeneratedFailed(t *testing.T) {
case <-tick: case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err) assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), dbBatch.Hash) batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err) assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned { if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
return return
@@ -549,14 +754,14 @@ func testProofGeneratedFailed(t *testing.T) {
assert.Equal(t, 1, int(chunkMaxAttempts)) assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts)) assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), dbBatch.Hash) batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts)) assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts)) assert.Equal(t, 0, int(batchActiveAttempts))
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeChunk, dbChunk.Hash) chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeChunk, dbChunk.Hash)
assert.NoError(t, err) assert.NoError(t, err)
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, dbBatch.Hash) batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), message.ProofTypeBatch, batch.Hash)
assert.NoError(t, err) assert.NoError(t, err)
if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid { if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid {
return return
@@ -571,7 +776,7 @@ func testProofGeneratedFailed(t *testing.T) {
func testTimeoutProof(t *testing.T) { func testTimeoutProof(t *testing.T) {
// Setup coordinator and ws server. // Setup coordinator and ws server.
coordinatorURL := randomURL() coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, []string{"darwinV2"}) collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
defer func() { defer func() {
collector.Stop() collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background())) assert.NoError(t, httpHandler.Shutdown(context.Background()))
@@ -592,25 +797,18 @@ func testTimeoutProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), batch) batch, err := batchOrm.InsertBatch(context.Background(), batch)
assert.NoError(t, err) assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 100, batch.Hash)
assert.NoError(t, err)
encodeData, err := json.Marshal(message.ChunkProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbChunk.Hash, encodeData, types.ProvingTaskUnassigned, 1)
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady) err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err) assert.NoError(t, err)
// create first chunk & batch mock prover, that will not send any proof. // create first chunk & batch mock prover, that will not send any proof.
chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), coordinatorURL, message.ProofTypeChunk, version.Version) chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), coordinatorURL, message.ProofTypeChunk, version.Version)
proverChunkTask, errChunkCode, errChunkMsg := chunkProver1.getProverTask(t, message.ProofTypeChunk) proverChunkTask, errChunkCode, errChunkMsg := chunkProver1.getProverTask(t, message.ProofTypeChunk, "istanbul")
assert.NotNil(t, proverChunkTask) assert.NotNil(t, proverChunkTask)
assert.Equal(t, errChunkCode, types.Success) assert.Equal(t, errChunkCode, types.Success)
assert.Equal(t, errChunkMsg, "") assert.Equal(t, errChunkMsg, "")
batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), coordinatorURL, message.ProofTypeBatch, version.Version) batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), coordinatorURL, message.ProofTypeBatch, version.Version)
proverBatchTask, errBatchCode, errBatchMsg := batchProver1.getProverTask(t, message.ProofTypeBatch) proverBatchTask, errBatchCode, errBatchMsg := batchProver1.getProverTask(t, message.ProofTypeBatch, "istanbul")
assert.NotNil(t, proverBatchTask) assert.NotNil(t, proverBatchTask)
assert.Equal(t, errBatchCode, types.Success) assert.Equal(t, errBatchCode, types.Success)
assert.Equal(t, errBatchMsg, "") assert.Equal(t, errBatchMsg, "")
@@ -639,18 +837,18 @@ func testTimeoutProof(t *testing.T) {
// create second mock prover, that will send valid proof. // create second mock prover, that will send valid proof.
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk, version.Version) chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk, version.Version)
proverChunkTask2, chunkTask2ErrCode, chunkTask2ErrMsg := chunkProver2.getProverTask(t, message.ProofTypeChunk) proverChunkTask2, chunkTask2ErrCode, chunkTask2ErrMsg := chunkProver2.getProverTask(t, message.ProofTypeChunk, "istanbul")
assert.NotNil(t, proverChunkTask2) assert.NotNil(t, proverChunkTask2)
assert.Equal(t, chunkTask2ErrCode, types.Success) assert.Equal(t, chunkTask2ErrCode, types.Success)
assert.Equal(t, chunkTask2ErrMsg, "") assert.Equal(t, chunkTask2ErrMsg, "")
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success) chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version) batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch) proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
assert.NotNil(t, proverBatchTask2) assert.NotNil(t, proverBatchTask2)
assert.Equal(t, batchTask2ErrCode, types.Success) assert.Equal(t, batchTask2ErrCode, types.Success)
assert.Equal(t, batchTask2ErrMsg, "") assert.Equal(t, batchTask2ErrMsg, "")
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success) batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
// verify proof status, it should be verified now, because second prover sent valid proof // verify proof status, it should be verified now, because second prover sent valid proof
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)

View File

@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
} }
// connectToCoordinator sets up a websocket client to connect to the prover manager. // connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator(t *testing.T, proverTypes []types.ProverType) (string, int, string) { func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
challengeString := r.challenge(t) challengeString := r.challenge(t)
return r.login(t, challengeString, proverTypes) return r.login(t, challengeString, forkName)
} }
func (r *mockProver) challenge(t *testing.T) string { func (r *mockProver) challenge(t *testing.T) string {
@@ -76,35 +76,43 @@ func (r *mockProver) challenge(t *testing.T) string {
return loginData.Token return loginData.Token
} }
func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []types.ProverType) (string, int, string) { func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
authMsg := types.LoginParameter{ var body string
Message: types.Message{ if forkName != "" {
Challenge: challengeString, authMsg := message.AuthMsg{
ProverName: r.proverName, Identity: &message.Identity{
ProverVersion: r.proverVersion, Challenge: challengeString,
ProverTypes: proverTypes, ProverName: r.proverName,
VKs: []string{"mock_vk"}, ProverVersion: r.proverVersion,
}, HardForkName: forkName,
PublicKey: r.publicKey(), },
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
} }
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body, err := json.Marshal(authMsg)
assert.NoError(t, err)
var result ctypes.Response var result ctypes.Response
client := resty.New() client := resty.New()
resp, err := client.R(). resp, err := client.R().
SetHeader("Content-Type", "application/json"). SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", challengeString)). SetHeader("Authorization", fmt.Sprintf("Bearer %s", challengeString)).
SetBody(body). SetBody([]byte(body)).
SetResult(&result). SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/login") Post("http://" + r.coordinatorURL + "/coordinator/v1/login")
assert.NoError(t, err) assert.NoError(t, err)
if result.ErrCode != 0 {
return "", result.ErrCode, result.ErrMsg
}
type login struct { type login struct {
Time string `json:"time"` Time string `json:"time"`
Token string `json:"token"` Token string `json:"token"`
@@ -114,7 +122,7 @@ func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []t
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode()) assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Empty(t, result.ErrMsg) assert.Empty(t, result.ErrMsg)
return loginData.Token, 0, "" return loginData.Token
} }
func (r *mockProver) healthCheckSuccess(t *testing.T) bool { func (r *mockProver) healthCheckSuccess(t *testing.T) bool {
@@ -141,12 +149,9 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
return true return true
} }
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*types.GetTaskSchema, int, string) { func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
// get task from coordinator // get task from coordinator
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)}) token := r.connectToCoordinator(t, forkName)
if errCode != 0 {
return nil, errCode, errMsg
}
assert.NotEmpty(t, token) assert.NotEmpty(t, token)
type response struct { type response struct {
@@ -160,7 +165,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
resp, err := client.R(). resp, err := client.R().
SetHeader("Content-Type", "application/json"). SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)). SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_types": []int{int(proofType)}}). SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetResult(&result). SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task") Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err) assert.NoError(t, err)
@@ -171,12 +176,9 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) (*
// Testing expected errors returned by coordinator. // Testing expected errors returned by coordinator.
// //
//nolint:unparam //nolint:unparam
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) { func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
// get task from coordinator // get task from coordinator
token, errCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(proofType)}) token := r.connectToCoordinator(t, forkName)
if errCode != 0 {
return errCode, errMsg
}
assert.NotEmpty(t, token) assert.NotEmpty(t, token)
type response struct { type response struct {
@@ -199,56 +201,50 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
return result.ErrCode, result.ErrMsg return result.ErrCode, result.ErrMsg
} }
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) { func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
proofMsgStatus := message.StatusOk proofMsgStatus := message.StatusOk
if proofStatus == generatedFailed { if proofStatus == generatedFailed {
proofMsgStatus = message.StatusProofError proofMsgStatus = message.StatusProofError
} }
var proof []byte proof := &message.ProofMsg{
switch proverTaskSchema.TaskType { ProofDetail: &message.ProofDetail{
case int(message.ProofTypeChunk): ID: proverTaskSchema.TaskID,
encodeData, err := json.Marshal(message.ChunkProof{}) Type: message.ProofType(proverTaskSchema.TaskType),
assert.NoError(t, err) Status: proofMsgStatus,
assert.NotEmpty(t, encodeData) ChunkProof: &message.ChunkProof{},
proof = encodeData BatchProof: &message.BatchProof{},
case int(message.ProofTypeBatch): },
encodeData, err := json.Marshal(message.BatchProof{})
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
} }
if proofStatus == verifiedFailed { if proofStatus == generatedFailed {
switch proverTaskSchema.TaskType { proof.Status = message.StatusProofError
case int(message.ProofTypeChunk): } else if proofStatus == verifiedFailed {
chunkProof := message.ChunkProof{} proof.ProofDetail.ChunkProof.Proof = []byte(verifier.InvalidTestProof)
chunkProof.Proof = []byte(verifier.InvalidTestProof) proof.ProofDetail.BatchProof.Proof = []byte(verifier.InvalidTestProof)
encodeData, err := json.Marshal(&chunkProof)
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
case int(message.ProofTypeBatch):
batchProof := message.BatchProof{}
batchProof.Proof = []byte(verifier.InvalidTestProof)
encodeData, err := json.Marshal(&batchProof)
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
proof = encodeData
}
} }
assert.NoError(t, proof.Sign(r.privKey))
submitProof := types.SubmitProofParameter{ submitProof := types.SubmitProofParameter{
UUID: proverTaskSchema.UUID, TaskID: proof.ID,
TaskID: proverTaskSchema.TaskID, TaskType: int(proof.Type),
TaskType: proverTaskSchema.TaskType, Status: int(proof.Status),
Status: int(proofMsgStatus),
Proof: string(proof),
} }
token, authErrCode, errMsg := r.connectToCoordinator(t, []types.ProverType{types.MakeProverType(message.ProofType(proverTaskSchema.TaskType))}) switch proof.Type {
assert.Equal(t, authErrCode, 0) case message.ProofTypeChunk:
assert.Equal(t, errMsg, "") encodeData, err := json.Marshal(proof.ChunkProof)
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
submitProof.Proof = string(encodeData)
case message.ProofTypeBatch:
encodeData, err := json.Marshal(proof.BatchProof)
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
submitProof.Proof = string(encodeData)
}
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token) assert.NotEmpty(t, token)
submitProofData, err := json.Marshal(submitProof) submitProofData, err := json.Marshal(submitProof)

View File

@@ -4,8 +4,6 @@ import (
"encoding/json" "encoding/json"
"os" "os"
"path/filepath" "path/filepath"
"scroll-tech/common/utils"
) )
// DBConfig db config // DBConfig db config
@@ -31,11 +29,5 @@ func NewConfig(file string) (*DBConfig, error) {
return nil, err return nil, err
} }
// Override config with environment variables
err = utils.OverrideConfigWithEnv(cfg, "SCROLL_ROLLUP_DB_CONFIG")
if err != nil {
return nil, err
}
return cfg, nil return cfg, nil
} }

View File

@@ -1,5 +1,5 @@
{ {
"dsn": "postgres://localhost/scroll?sslmode=disable", "dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",
"driver_name": "postgres", "driver_name": "postgres",
"maxOpenNum": 200, "maxOpenNum": 200,
"maxIdleNum": 20 "maxIdleNum": 20

View File

@@ -6,7 +6,7 @@ require (
github.com/jmoiron/sqlx v1.3.5 github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.9
github.com/pressly/goose/v3 v3.16.0 github.com/pressly/goose/v3 v3.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
) )
@@ -33,11 +33,11 @@ require (
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.24.0 // indirect golang.org/x/crypto v0.19.0 // indirect
golang.org/x/net v0.25.0 // indirect golang.org/x/mod v0.16.0 // indirect
golang.org/x/sync v0.7.0 // indirect golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.16.0 // indirect golang.org/x/tools v0.17.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/protobuf v1.33.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect

View File

@@ -121,8 +121,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
@@ -155,20 +155,20 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=

View File

@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB) cur, err := Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
// total number of tables. // total number of tables.
assert.Equal(t, int64(24), cur) assert.Equal(t, int64(20), cur)
} }
func testMigrate(t *testing.T) { func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB)) assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB) cur, err := Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(24), cur) assert.Equal(t, int64(20), cur)
} }
func testRollback(t *testing.T) { func testRollback(t *testing.T) {
version, err := Current(pgDB) version, err := Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(24), version) assert.Equal(t, int64(20), version)
assert.NoError(t, Rollback(pgDB, nil)) assert.NoError(t, Rollback(pgDB, nil))

View File

@@ -1,53 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE bundle (
index BIGSERIAL PRIMARY KEY,
hash VARCHAR NOT NULL, -- Not part of DA hash, used for SQL query consistency and ease of use, derived using keccak256(concat(start_batch_hash_bytes, end_batch_hash_bytes)).
start_batch_index BIGINT NOT NULL,
end_batch_index BIGINT NOT NULL,
start_batch_hash VARCHAR NOT NULL,
end_batch_hash VARCHAR NOT NULL,
codec_version SMALLINT NOT NULL,
-- proof
batch_proofs_status SMALLINT NOT NULL DEFAULT 1,
proving_status SMALLINT NOT NULL DEFAULT 1,
proof BYTEA DEFAULT NULL,
proved_at TIMESTAMP(0) DEFAULT NULL,
proof_time_sec INTEGER DEFAULT NULL,
total_attempts SMALLINT NOT NULL DEFAULT 0,
active_attempts SMALLINT NOT NULL DEFAULT 0,
-- rollup
rollup_status SMALLINT NOT NULL DEFAULT 1,
finalize_tx_hash VARCHAR DEFAULT NULL,
finalized_at TIMESTAMP(0) DEFAULT NULL,
-- metadata
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL
);
CREATE INDEX idx_bundle_index_rollup_status ON bundle(index, rollup_status) WHERE deleted_at IS NULL;
CREATE INDEX idx_bundle_hash ON bundle(hash) WHERE deleted_at IS NULL;
CREATE INDEX idx_bundle_hash_proving_status ON bundle(hash, proving_status) WHERE deleted_at IS NULL;
CREATE INDEX idx_bundle_index_desc ON bundle(index DESC) WHERE deleted_at IS NULL;
CREATE INDEX idx_bundle_batch_proofs_status ON bundle(batch_proofs_status) WHERE deleted_at IS NULL;
CREATE INDEX idx_bundle_start_batch_index ON bundle(start_batch_index) WHERE deleted_at IS NULL;
CREATE INDEX idx_bundle_end_batch_index ON bundle(end_batch_index) WHERE deleted_at IS NULL;
create index idx_bundle_total_attempts_active_attempts_batch_proofs_status
on bundle (total_attempts, active_attempts, batch_proofs_status)
where deleted_at IS NULL;
COMMENT ON COLUMN bundle.batch_proofs_status IS 'undefined, pending, ready';
COMMENT ON COLUMN bundle.proving_status IS 'undefined, unassigned, assigned, proved (deprecated), verified, failed';
COMMENT ON COLUMN bundle.rollup_status IS 'undefined, pending, committing (not used for bundles), committed (not used for bundles), finalizing, finalized, commit_failed (not used for bundles), finalize_failed';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP TABLE IF EXISTS bundle;
-- +goose StatementEnd

View File

@@ -1,23 +0,0 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE batch
ADD COLUMN bundle_hash VARCHAR DEFAULT '',
ADD COLUMN codec_version SMALLINT DEFAULT 0;
CREATE INDEX idx_batch_bundle_hash ON batch(bundle_hash);
CREATE INDEX idx_batch_index_codec_version ON batch(index, codec_version);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
DROP INDEX IF EXISTS idx_batch_bundle_hash;
DROP INDEX IF EXISTS idx_batch_index_codec_version;
ALTER TABLE IF EXISTS batch
DROP COLUMN IF EXISTS bundle_hash,
DROP COLUMN IF EXISTS codec_version;
-- +goose StatementEnd

View File

@@ -1,23 +0,0 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE chunk
ADD COLUMN codec_version SMALLINT NOT NULL DEFAULT 0,
ADD COLUMN enable_compress BOOLEAN NOT NULL DEFAULT false;
ALTER TABLE batch
ADD COLUMN enable_compress BOOLEAN NOT NULL DEFAULT false;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS chunk
DROP COLUMN IF EXISTS enable_compress,
DROP COLUMN IF EXISTS codec_version;
ALTER TABLE IF EXISTS batch
DROP COLUMN IF EXISTS enable_compress;
-- +goose StatementEnd

View File

@@ -1,15 +0,0 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE batch
ADD COLUMN blob_bytes BYTEA;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS batch
DROP COLUMN IF EXISTS blob_bytes;
-- +goose StatementEnd

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More