mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
41 Commits
fix_deploy
...
v4.3.95
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da4f6818e3 | ||
|
|
200ca7c15b | ||
|
|
53cf26597d | ||
|
|
f0f7341271 | ||
|
|
b6af88c936 | ||
|
|
de541a650a | ||
|
|
d7a57235d3 | ||
|
|
91d21301ec | ||
|
|
4b32a44a70 | ||
|
|
55b400c5fb | ||
|
|
1b49091207 | ||
|
|
5b827c3c18 | ||
|
|
6b2eb80aa5 | ||
|
|
71f88b04f5 | ||
|
|
bcd9764bcd | ||
|
|
b4f8377a08 | ||
|
|
b52d43caa8 | ||
|
|
201bf401cd | ||
|
|
898ac1d25c | ||
|
|
1336b89fb8 | ||
|
|
73045df037 | ||
|
|
b3093e9eb6 | ||
|
|
3d5250e52d | ||
|
|
b7324c76bc | ||
|
|
6d6e98bd6e | ||
|
|
9e35ce0ab4 | ||
|
|
b86ebaefaf | ||
|
|
78a4298eda | ||
|
|
49d8387714 | ||
|
|
af2913903b | ||
|
|
f8a7d70872 | ||
|
|
790fc44b40 | ||
|
|
620c71b16d | ||
|
|
ed0e0e4c18 | ||
|
|
d203033e13 | ||
|
|
7d45926687 | ||
|
|
5362e28f74 | ||
|
|
e8eb7ff8fd | ||
|
|
b01b5819da | ||
|
|
cb09024821 | ||
|
|
8bd4277c13 |
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
8
.github/workflows/coordinator.yml
vendored
8
.github/workflows/coordinator.yml
vendored
@@ -33,13 +33,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
|
||||
7
.github/workflows/docker.yml
vendored
7
.github/workflows/docker.yml
vendored
@@ -46,6 +46,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -90,6 +91,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -134,6 +136,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -178,6 +181,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -222,6 +226,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -266,6 +271,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -310,6 +316,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-cron.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
4
.github/workflows/intermediate-docker.yml
vendored
4
.github/workflows/intermediate-docker.yml
vendored
@@ -7,12 +7,12 @@ on:
|
||||
description: 'Go version'
|
||||
required: true
|
||||
type: string
|
||||
default: '1.20'
|
||||
default: '1.21'
|
||||
RUST_VERSION:
|
||||
description: 'Rust toolchain version'
|
||||
required: true
|
||||
type: string
|
||||
default: 'nightly-2022-12-10'
|
||||
default: 'nightly-2023-12-03'
|
||||
PYTHON_VERSION:
|
||||
description: 'Python version'
|
||||
required: false
|
||||
|
||||
2
.github/workflows/prover.yml
vendored
2
.github/workflows/prover.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
@@ -8,7 +8,7 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.6.0
|
||||
@@ -60,6 +60,7 @@ require (
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
|
||||
@@ -184,8 +184,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
|
||||
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
|
||||
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
@@ -311,8 +311,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
|
||||
@@ -23,6 +23,7 @@ type FetcherConfig struct {
|
||||
DAIGatewayAddr string `json:"DAIGatewayAddr"`
|
||||
USDCGatewayAddr string `json:"USDCGatewayAddr"`
|
||||
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
|
||||
PufferGatewayAddr string `json:"PufferGatewayAddr"`
|
||||
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
|
||||
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
|
||||
@@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
|
||||
@@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -17,7 +17,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
ifeq ($(GO_VERSION),)
|
||||
GO_VERSION=1.20
|
||||
GO_VERSION=1.21
|
||||
endif
|
||||
ifeq ($(RUST_VERSION),)
|
||||
RUST_VERSION=nightly-2022-12-10
|
||||
RUST_VERSION=nightly-2023-12-03
|
||||
endif
|
||||
ifeq ($(PYTHON_VERSION),)
|
||||
PYTHON_VERSION=3.10
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
ARG CUDA_VERSION=11.7.1
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG GO_VERSION=1.21
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG ALPINE_VERSION=3.15
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,62 +1,27 @@
|
||||
package database
|
||||
package database_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestGormLogger(t *testing.T) {
|
||||
output := io.Writer(os.Stderr)
|
||||
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
|
||||
if usecolor {
|
||||
output = colorable.NewColorableStderr()
|
||||
}
|
||||
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
|
||||
glogger := log.NewGlogHandler(ostream)
|
||||
// Set log level
|
||||
glogger.Verbosity(log.LvlTrace)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var gl gormLogger
|
||||
gl.gethLogger = log.Root()
|
||||
|
||||
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
|
||||
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
|
||||
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
|
||||
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
version.Version = "v4.1.98-aaa-bbb-ccc"
|
||||
base := docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
dbCfg := &Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
testApps := testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err := InitDB(dbCfg)
|
||||
db, err := testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
sqlDB, err := Ping(db)
|
||||
sqlDB, err := database.Ping(db)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, sqlDB)
|
||||
|
||||
assert.NoError(t, CloseDB(db))
|
||||
assert.NoError(t, database.CloseDB(db))
|
||||
}
|
||||
|
||||
35
common/database/logger_test.go
Normal file
35
common/database/logger_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func TestGormLogger(t *testing.T) {
|
||||
output := io.Writer(os.Stderr)
|
||||
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
|
||||
if usecolor {
|
||||
output = colorable.NewColorableStderr()
|
||||
}
|
||||
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
|
||||
glogger := log.NewGlogHandler(ostream)
|
||||
// Set log level
|
||||
glogger.Verbosity(log.LvlTrace)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var gl gormLogger
|
||||
gl.gethLogger = log.Root()
|
||||
|
||||
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
|
||||
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
|
||||
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
|
||||
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
|
||||
}
|
||||
@@ -19,7 +19,7 @@ CAPELLA_FORK_VERSION: 0x20000092
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD: 16
|
||||
|
||||
# Deneb
|
||||
DENEB_FORK_EPOCH: 1
|
||||
DENEB_FORK_EPOCH: 0
|
||||
DENEB_FORK_VERSION: 0x20000093
|
||||
|
||||
# Time parameters
|
||||
|
||||
@@ -19,7 +19,7 @@ services:
|
||||
command:
|
||||
- testnet
|
||||
- generate-genesis
|
||||
- --fork=capella
|
||||
- --fork=deneb
|
||||
- --num-validators=64
|
||||
- --genesis-time-delay=3
|
||||
- --output-ssz=/data/consensus/genesis.ssz
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
l1StartPort = 10000
|
||||
l2StartPort = 20000
|
||||
dbStartPort = 30000
|
||||
)
|
||||
|
||||
// AppAPI app interface.
|
||||
type AppAPI interface {
|
||||
IsRunning() bool
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
|
||||
}
|
||||
|
||||
// App is collection struct of runtime docker images
|
||||
type App struct {
|
||||
L1gethImg GethImgInstance
|
||||
L2gethImg GethImgInstance
|
||||
DBImg ImgInstance
|
||||
|
||||
dbClient *sql.DB
|
||||
DBConfig *database.DBConfig
|
||||
DBConfigFile string
|
||||
|
||||
// common time stamp.
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewDockerApp returns new instance of dockerApp struct
|
||||
func NewDockerApp() *App {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
app := &App{
|
||||
Timestamp: timestamp,
|
||||
L1gethImg: newTestL1Docker(),
|
||||
L2gethImg: newTestL2Docker(),
|
||||
DBImg: newTestDBDocker("postgres"),
|
||||
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
|
||||
}
|
||||
if err := app.mockDBConfig(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
// RunImages runs all images togather
|
||||
func (b *App) RunImages(t *testing.T) {
|
||||
b.RunDBImage(t)
|
||||
b.RunL1Geth(t)
|
||||
b.RunL2Geth(t)
|
||||
}
|
||||
|
||||
// RunDBImage starts postgres docker container.
|
||||
func (b *App) RunDBImage(t *testing.T) {
|
||||
if b.DBImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.DBImg.Start())
|
||||
|
||||
// try 5 times until the db is ready.
|
||||
ok := utils.TryTimes(10, func() bool {
|
||||
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
|
||||
return err == nil && db != nil && db.Ping() == nil
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
// Free clear all running images, double check and recycle docker container.
|
||||
func (b *App) Free() {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
_ = b.L1gethImg.Stop()
|
||||
}
|
||||
if b.L2gethImg.IsRunning() {
|
||||
_ = b.L2gethImg.Stop()
|
||||
}
|
||||
if b.DBImg.IsRunning() {
|
||||
_ = b.DBImg.Stop()
|
||||
_ = os.Remove(b.DBConfigFile)
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
_ = b.dbClient.Close()
|
||||
b.dbClient = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunL1Geth starts l1geth docker container.
|
||||
func (b *App) RunL1Geth(t *testing.T) {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.L1gethImg.Start())
|
||||
}
|
||||
|
||||
// L1Client returns a ethclient by dialing running l1geth
|
||||
func (b *App) L1Client() (*ethclient.Client, error) {
|
||||
if utils.IsNil(b.L1gethImg) {
|
||||
return nil, fmt.Errorf("l1 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// RunL2Geth starts l2geth docker container.
|
||||
func (b *App) RunL2Geth(t *testing.T) {
|
||||
if b.L2gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.L2gethImg.Start())
|
||||
}
|
||||
|
||||
// L2Client returns a ethclient by dialing running l2geth
|
||||
func (b *App) L2Client() (*ethclient.Client, error) {
|
||||
if utils.IsNil(b.L2gethImg) {
|
||||
return nil, fmt.Errorf("l2 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// DBClient create and return *sql.DB instance.
|
||||
func (b *App) DBClient(t *testing.T) *sql.DB {
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
return b.dbClient
|
||||
}
|
||||
var (
|
||||
cfg = b.DBConfig
|
||||
err error
|
||||
)
|
||||
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
|
||||
assert.NoError(t, err)
|
||||
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
|
||||
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
|
||||
assert.NoError(t, b.dbClient.Ping())
|
||||
return b.dbClient
|
||||
}
|
||||
|
||||
func (b *App) mockDBConfig() error {
|
||||
b.DBConfig = &database.DBConfig{
|
||||
DSN: "",
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
|
||||
if b.DBImg != nil {
|
||||
b.DBConfig.DSN = b.DBImg.Endpoint()
|
||||
}
|
||||
data, err := json.Marshal(b.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
|
||||
}
|
||||
|
||||
func newTestL1Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestL2Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestDBDocker(driverName string) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgDB the postgres image manager.
|
||||
type ImgDB struct {
|
||||
image string
|
||||
name string
|
||||
id string
|
||||
|
||||
dbName string
|
||||
port int
|
||||
password string
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgDB return postgres db img instance.
|
||||
func NewImgDB(image, password, dbName string, port int) ImgInstance {
|
||||
img := &ImgDB{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
|
||||
password: password,
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start postgres db container.
|
||||
func (i *ImgDB) Start() error {
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the container.
|
||||
func (i *ImgDB) Stop() error {
|
||||
if !i.running {
|
||||
return nil
|
||||
}
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// stop the running container.
|
||||
if i.id == "" {
|
||||
i.id = GetContainerID(i.name)
|
||||
}
|
||||
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
// Endpoint return the dsn.
|
||||
func (i *ImgDB) Endpoint() string {
|
||||
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgDB) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
}
|
||||
|
||||
cmd = append(cmd, envs...)
|
||||
return append(cmd, i.image)
|
||||
}
|
||||
|
||||
func (i *ImgDB) isOk() bool {
|
||||
keyword := "database system is ready to accept connections"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
// Start cmd in parallel.
|
||||
i.cmd.RunCmd(true)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
utils.TryTimes(20, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 20):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgGeth the geth image manager include l1geth and l2geth.
|
||||
type ImgGeth struct {
|
||||
image string
|
||||
name string
|
||||
id string
|
||||
|
||||
volume string
|
||||
ipcPath string
|
||||
httpPort int
|
||||
wsPort int
|
||||
chainID *big.Int
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgGeth return geth img instance.
|
||||
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
|
||||
img := &ImgGeth{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
|
||||
volume: volume,
|
||||
ipcPath: ipc,
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.params()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start run image and check if it is running healthily.
|
||||
func (i *ImgGeth) Start() error {
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
|
||||
// try 10 times to get chainID until is ok.
|
||||
utils.TryTimes(10, func() bool {
|
||||
client, err := ethclient.Dial(i.Endpoint())
|
||||
if err == nil && client != nil {
|
||||
i.chainID, err = client.ChainID(context.Background())
|
||||
return err == nil && i.chainID != nil
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgGeth) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
// Endpoint return the connection endpoint.
|
||||
func (i *ImgGeth) Endpoint() string {
|
||||
switch true {
|
||||
case i.httpPort != 0:
|
||||
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
|
||||
case i.wsPort != 0:
|
||||
return fmt.Sprintf("ws://127.0.0.1:%d", i.wsPort)
|
||||
default:
|
||||
return i.ipcPath
|
||||
}
|
||||
}
|
||||
|
||||
// ChainID return chainID.
|
||||
func (i *ImgGeth) ChainID() *big.Int {
|
||||
return i.chainID
|
||||
}
|
||||
|
||||
func (i *ImgGeth) isOk() bool {
|
||||
keyword := "WebSocket enabled"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
// Start cmd in parallel.
|
||||
i.cmd.RunCmd(true)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
utils.TryTimes(20, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 10):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
|
||||
// Stop the docker container.
|
||||
func (i *ImgGeth) Stop() error {
|
||||
if !i.running {
|
||||
return nil
|
||||
}
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// check if container is running, stop the running container.
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
i.id = id
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
func (i *ImgGeth) params() []string {
|
||||
cmds := []string{"run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
}
|
||||
if i.wsPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.wsPort) + ":8546"}...)
|
||||
}
|
||||
|
||||
var envs []string
|
||||
if i.ipcPath != "" {
|
||||
envs = append(envs, []string{"-e", fmt.Sprintf("IPC_PATH=%s", i.ipcPath)}...)
|
||||
}
|
||||
|
||||
if i.volume != "" {
|
||||
cmds = append(cmds, []string{"-v", fmt.Sprintf("%s:%s", i.volume, i.volume)}...)
|
||||
}
|
||||
|
||||
cmds = append(cmds, ports...)
|
||||
cmds = append(cmds, envs...)
|
||||
|
||||
return append(cmds, i.image)
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq" //nolint:golint
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
base.RunDBImage(t)
|
||||
|
||||
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.Ping())
|
||||
}
|
||||
|
||||
func TestL1Geth(t *testing.T) {
|
||||
base.RunL1Geth(t)
|
||||
|
||||
client, err := base.L1Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func TestL2Geth(t *testing.T) {
|
||||
base.RunL2Geth(t)
|
||||
|
||||
client, err := base.L2Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
"archimedesBlock": 0,
|
||||
"shanghaiBlock": 0,
|
||||
"clique": {
|
||||
"period": 3,
|
||||
"period": 1,
|
||||
"epoch": 30000
|
||||
},
|
||||
"scroll": {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
@@ -8,30 +9,48 @@ import (
|
||||
)
|
||||
|
||||
// CollectSortedForkHeights returns a sorted set of block numbers that one or more forks are activated on
|
||||
func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool) {
|
||||
forkHeightsMap := make(map[uint64]bool)
|
||||
for _, fork := range []*big.Int{
|
||||
config.HomesteadBlock,
|
||||
config.DAOForkBlock,
|
||||
config.EIP150Block,
|
||||
config.EIP155Block,
|
||||
config.EIP158Block,
|
||||
config.ByzantiumBlock,
|
||||
config.ConstantinopleBlock,
|
||||
config.PetersburgBlock,
|
||||
config.IstanbulBlock,
|
||||
config.MuirGlacierBlock,
|
||||
config.BerlinBlock,
|
||||
config.LondonBlock,
|
||||
config.ArrowGlacierBlock,
|
||||
config.ArchimedesBlock,
|
||||
config.ShanghaiBlock,
|
||||
func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]bool, map[string]uint64) {
|
||||
type nameFork struct {
|
||||
name string
|
||||
block *big.Int
|
||||
}
|
||||
|
||||
forkHeightNameMap := make(map[uint64]string)
|
||||
|
||||
for _, fork := range []nameFork{
|
||||
{name: "homestead", block: config.HomesteadBlock},
|
||||
{name: "daoFork", block: config.DAOForkBlock},
|
||||
{name: "eip150", block: config.EIP150Block},
|
||||
{name: "eip155", block: config.EIP155Block},
|
||||
{name: "eip158", block: config.EIP158Block},
|
||||
{name: "byzantium", block: config.ByzantiumBlock},
|
||||
{name: "constantinople", block: config.ConstantinopleBlock},
|
||||
{name: "petersburg", block: config.PetersburgBlock},
|
||||
{name: "istanbul", block: config.IstanbulBlock},
|
||||
{name: "muirGlacier", block: config.MuirGlacierBlock},
|
||||
{name: "berlin", block: config.BerlinBlock},
|
||||
{name: "london", block: config.LondonBlock},
|
||||
{name: "arrowGlacier", block: config.ArrowGlacierBlock},
|
||||
{name: "archimedes", block: config.ArchimedesBlock},
|
||||
{name: "shanghai", block: config.ShanghaiBlock},
|
||||
{name: "bernoulli", block: config.BernoulliBlock},
|
||||
{name: "curie", block: config.CurieBlock},
|
||||
} {
|
||||
if fork == nil {
|
||||
if fork.block == nil {
|
||||
continue
|
||||
} else if height := fork.Uint64(); height != 0 {
|
||||
forkHeightsMap[height] = true
|
||||
}
|
||||
height := fork.block.Uint64()
|
||||
|
||||
// only keep latest fork for at each height, discard the rest
|
||||
forkHeightNameMap[height] = fork.name
|
||||
}
|
||||
|
||||
forkHeightsMap := make(map[uint64]bool)
|
||||
forkNameHeightMap := make(map[string]uint64)
|
||||
|
||||
for height, name := range forkHeightNameMap {
|
||||
forkHeightsMap[height] = true
|
||||
forkNameHeightMap[name] = height
|
||||
}
|
||||
|
||||
var forkHeights []uint64
|
||||
@@ -41,7 +60,7 @@ func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]
|
||||
sort.Slice(forkHeights, func(i, j int) bool {
|
||||
return forkHeights[i] < forkHeights[j]
|
||||
})
|
||||
return forkHeights, forkHeightsMap
|
||||
return forkHeights, forkHeightsMap, forkNameHeightMap
|
||||
}
|
||||
|
||||
// BlocksUntilFork returns the number of blocks until the next fork
|
||||
@@ -54,3 +73,17 @@ func BlocksUntilFork(blockHeight uint64, forkHeights []uint64) uint64 {
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// BlockRange returns the block range of the hard fork
|
||||
// Need ensure the forkHeights is incremental
|
||||
func BlockRange(currentForkHeight uint64, forkHeights []uint64) (from, to uint64) {
|
||||
to = math.MaxInt64
|
||||
for _, height := range forkHeights {
|
||||
if currentForkHeight < height {
|
||||
to = height
|
||||
return
|
||||
}
|
||||
from = height
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package forks
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
@@ -9,20 +10,27 @@ import (
|
||||
)
|
||||
|
||||
func TestCollectSortedForkBlocks(t *testing.T) {
|
||||
l, m := CollectSortedForkHeights(¶ms.ChainConfig{
|
||||
EIP155Block: big.NewInt(4),
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(3),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
l, m, n := CollectSortedForkHeights(¶ms.ChainConfig{
|
||||
ArchimedesBlock: big.NewInt(0),
|
||||
ShanghaiBlock: big.NewInt(3),
|
||||
BernoulliBlock: big.NewInt(3),
|
||||
CurieBlock: big.NewInt(4),
|
||||
})
|
||||
require.Equal(t, l, []uint64{
|
||||
0,
|
||||
3,
|
||||
4,
|
||||
})
|
||||
require.Equal(t, map[uint64]bool{
|
||||
3: true,
|
||||
4: true,
|
||||
0: true,
|
||||
}, m)
|
||||
require.Equal(t, map[string]uint64{
|
||||
"archimedes": 0,
|
||||
"bernoulli": 3,
|
||||
"curie": 4,
|
||||
}, n)
|
||||
}
|
||||
|
||||
func TestBlocksUntilFork(t *testing.T) {
|
||||
@@ -64,3 +72,71 @@ func TestBlocksUntilFork(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
forkHeight uint64
|
||||
forkHeights []uint64
|
||||
expectedFrom uint64
|
||||
expectedTo uint64
|
||||
}{
|
||||
{
|
||||
name: "ToInfinite",
|
||||
forkHeight: 300,
|
||||
forkHeights: []uint64{100, 200, 300},
|
||||
expectedFrom: 300,
|
||||
expectedTo: math.MaxInt64,
|
||||
},
|
||||
{
|
||||
name: "To300",
|
||||
forkHeight: 200,
|
||||
forkHeights: []uint64{100, 200, 300},
|
||||
expectedFrom: 200,
|
||||
expectedTo: 300,
|
||||
},
|
||||
{
|
||||
name: "To200",
|
||||
forkHeight: 100,
|
||||
forkHeights: []uint64{100, 200, 300},
|
||||
expectedFrom: 100,
|
||||
expectedTo: 200,
|
||||
},
|
||||
{
|
||||
name: "To100",
|
||||
forkHeight: 0,
|
||||
forkHeights: []uint64{100, 200, 300},
|
||||
expectedFrom: 0,
|
||||
expectedTo: 100,
|
||||
},
|
||||
{
|
||||
name: "To200-1",
|
||||
forkHeight: 100,
|
||||
forkHeights: []uint64{100, 200},
|
||||
expectedFrom: 100,
|
||||
expectedTo: 200,
|
||||
},
|
||||
{
|
||||
name: "To2",
|
||||
forkHeight: 1,
|
||||
forkHeights: []uint64{1, 2},
|
||||
expectedFrom: 1,
|
||||
expectedTo: 2,
|
||||
},
|
||||
{
|
||||
name: "ToInfinite-1",
|
||||
forkHeight: 0,
|
||||
forkHeights: []uint64{0},
|
||||
expectedFrom: 0,
|
||||
expectedTo: math.MaxInt64,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
from, to := BlockRange(test.forkHeight, test.forkHeights)
|
||||
require.Equal(t, test.expectedFrom, from)
|
||||
require.Equal(t, test.expectedTo, to)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,17 +9,16 @@ require (
|
||||
github.com/docker/docker v25.0.3+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/testcontainers/testcontainers-go v0.29.1
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1
|
||||
github.com/testcontainers/testcontainers-go v0.28.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
gorm.io/gorm v1.25.5
|
||||
@@ -127,7 +126,7 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.0 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
@@ -144,7 +143,6 @@ require (
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
|
||||
@@ -268,7 +268,6 @@ github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXS
|
||||
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
|
||||
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
@@ -382,8 +381,8 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
|
||||
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
|
||||
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
@@ -400,8 +399,6 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
@@ -443,7 +440,6 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
@@ -469,9 +465,6 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
|
||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
@@ -614,8 +607,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -678,10 +671,12 @@ github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZ
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk=
|
||||
github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1 h1:47ipPM+s+ltCDOP3Sa1j95AkNb+z+WGiHLDbLU8ixuc=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1/go.mod h1:Sqh+Ef2ESdbJQjTJl57UOkEHkOc7gXvQLg1b5xh6f1Y=
|
||||
github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8=
|
||||
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0 h1:QOCeTYZIYixg796Ik60MOaeMgpAKPbQd5pJOdTrftyg=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 h1:ff0s4JdYIdNAVSi/SrpN2Pdt1f+IjIw3AKjbHau8Un4=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
|
||||
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
|
||||
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
|
||||
|
||||
1054
common/libzkp/impl/Cargo.lock
generated
1054
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,26 +8,30 @@ edition = "2021"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[patch.crates-io]
|
||||
gobuild = { git = "https://github.com/scroll-tech/gobuild.git" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
#ethers-etherscan = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
#ethers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2wrong.git"]
|
||||
halo2wrong = { git = "https://github.com/scroll-tech/halo2wrong.git", branch = "halo2-ecc-snark-verifier-0323" }
|
||||
maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-ecc-snark-verifier-0323" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.9", default-features = false, features = ["parallel_syn", "scroll", "shanghai", "strict-ccc"] }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
once_cell = "1.8.0"
|
||||
once_cell = "1.19"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0.66"
|
||||
|
||||
@@ -1 +1 @@
|
||||
nightly-2022-12-10
|
||||
nightly-2023-12-03
|
||||
|
||||
@@ -12,6 +12,7 @@ use prover::{
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, BlockTrace, ChunkHash, ChunkProof,
|
||||
};
|
||||
use snark_verifier_sdk::verify_evm_calldata;
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
@@ -119,7 +120,7 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
|
||||
let chunk_hashes_proofs = chunk_hashes
|
||||
.into_iter()
|
||||
.zip(chunk_proofs.into_iter())
|
||||
.zip(chunk_proofs)
|
||||
.collect();
|
||||
|
||||
let proof = PROVER
|
||||
@@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
|
||||
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"" => 0,
|
||||
"shanghai" => 0,
|
||||
"bernoulli" => 1,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
|
||||
1
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 0 {
|
||||
// before upgrade#2(EIP4844)
|
||||
verify_evm_calldata(
|
||||
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
|
||||
proof.calldata(),
|
||||
)
|
||||
} else {
|
||||
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
|
||||
}
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
|
||||
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
Binary file not shown.
@@ -1,5 +1,3 @@
|
||||
#![feature(once_cell)]
|
||||
|
||||
mod batch;
|
||||
mod chunk;
|
||||
mod types;
|
||||
|
||||
@@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_batch_vk();
|
||||
char* check_chunk_proofs(char* chunk_proofs);
|
||||
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
|
||||
char verify_batch_proof(char* proof);
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
|
||||
void init_chunk_prover(char* params_dir, char* assets_dir);
|
||||
void init_chunk_verifier(char* params_dir, char* assets_dir);
|
||||
|
||||
200
common/testcontainers/testcontainers.go
Normal file
200
common/testcontainers/testcontainers.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package testcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
// TestcontainerApps testcontainers struct
|
||||
type TestcontainerApps struct {
|
||||
postgresContainer *postgres.PostgresContainer
|
||||
l1GethContainer *testcontainers.DockerContainer
|
||||
l2GethContainer *testcontainers.DockerContainer
|
||||
|
||||
// common time stamp in nanoseconds.
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewTestcontainerApps returns new instance of TestcontainerApps struct
|
||||
func NewTestcontainerApps() *TestcontainerApps {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
return &TestcontainerApps{
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// StartPostgresContainer starts a postgres container
|
||||
func (t *TestcontainerApps) StartPostgresContainer() error {
|
||||
if t.postgresContainer != nil && t.postgresContainer.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
postgresContainer, err := postgres.RunContainer(context.Background(),
|
||||
testcontainers.WithImage("postgres"),
|
||||
postgres.WithDatabase("test_db"),
|
||||
postgres.WithPassword("123456"),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").WithOccurrence(2).WithStartupTimeout(5*time.Second)),
|
||||
)
|
||||
if err != nil {
|
||||
log.Printf("failed to start postgres container: %s", err)
|
||||
return err
|
||||
}
|
||||
t.postgresContainer = postgresContainer
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartL1GethContainer starts a L1Geth container
|
||||
func (t *TestcontainerApps) StartL1GethContainer() error {
|
||||
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l1geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
Cmd: []string{"--log.debug", "ANY"},
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
Started: true,
|
||||
}
|
||||
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
|
||||
if err != nil {
|
||||
log.Printf("failed to start scroll_l1geth container: %s", err)
|
||||
return err
|
||||
}
|
||||
t.l1GethContainer, _ = container.(*testcontainers.DockerContainer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartL2GethContainer starts a L2Geth container
|
||||
func (t *TestcontainerApps) StartL2GethContainer() error {
|
||||
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l2geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
Started: true,
|
||||
}
|
||||
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
|
||||
if err != nil {
|
||||
log.Printf("failed to start scroll_l2geth container: %s", err)
|
||||
return err
|
||||
}
|
||||
t.l2GethContainer, _ = container.(*testcontainers.DockerContainer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDBEndPoint returns the endpoint of the running postgres container
|
||||
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
|
||||
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
|
||||
return "", fmt.Errorf("postgres is not running")
|
||||
}
|
||||
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
|
||||
}
|
||||
|
||||
// GetL1GethEndPoint returns the endpoint of the running L1Geth container
|
||||
func (t *TestcontainerApps) GetL1GethEndPoint() (string, error) {
|
||||
if t.l1GethContainer == nil || !t.l1GethContainer.IsRunning() {
|
||||
return "", fmt.Errorf("l1 geth is not running")
|
||||
}
|
||||
endpoint, err := t.l1GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
|
||||
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
|
||||
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
|
||||
return "", fmt.Errorf("l2 geth is not running")
|
||||
}
|
||||
endpoint, err := t.l2GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
|
||||
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
endpoint, err := t.GetDBEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbCfg := &database.Config{
|
||||
DSN: endpoint,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
return database.InitDB(dbCfg)
|
||||
}
|
||||
|
||||
// GetL1GethClient returns a ethclient by dialing running L1Geth
|
||||
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
|
||||
endpoint, err := t.GetL1GethEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := ethclient.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// GetL2GethClient returns a ethclient by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
|
||||
endpoint, err := t.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := ethclient.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Free stops all running containers
|
||||
func (t *TestcontainerApps) Free() {
|
||||
ctx := context.Background()
|
||||
if t.postgresContainer != nil && t.postgresContainer.IsRunning() {
|
||||
if err := t.postgresContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop postgres container: %s", err)
|
||||
}
|
||||
}
|
||||
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
|
||||
if err := t.l1GethContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop scroll_l1geth container: %s", err)
|
||||
}
|
||||
}
|
||||
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
|
||||
if err := t.l2GethContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop scroll_l2geth container: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
59
common/testcontainers/testcontainers_test.go
Normal file
59
common/testcontainers/testcontainers_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package testcontainers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// TestNewTestcontainerApps tests NewTestcontainerApps
|
||||
func TestNewTestcontainerApps(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
endpoint string
|
||||
gormDBclient *gorm.DB
|
||||
ethclient *ethclient.Client
|
||||
)
|
||||
|
||||
// test start testcontainers
|
||||
testApps := NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
endpoint, err = testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
gormDBclient, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, gormDBclient)
|
||||
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetL1GethClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
// test free testcontainers
|
||||
testApps.Free()
|
||||
endpoint, err = testApps.GetDBEndPoint()
|
||||
assert.EqualError(t, err, "postgres is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
|
||||
endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.EqualError(t, err, "l1 geth is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.EqualError(t, err, "l2 geth is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
}
|
||||
@@ -442,9 +442,9 @@ func EstimateBatchL1CommitGas(b *encoding.Batch) (uint64, error) {
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
|
||||
func EstimateBatchL1CommitCalldataSize(c *encoding.Batch) (uint64, error) {
|
||||
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) (uint64, error) {
|
||||
var totalL1CommitCalldataSize uint64
|
||||
for _, chunk := range c.Chunks {
|
||||
for _, chunk := range b.Chunks {
|
||||
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -210,6 +210,10 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
return nil, fmt.Errorf("too many chunks in batch")
|
||||
}
|
||||
|
||||
if len(batch.Chunks) == 0 {
|
||||
return nil, fmt.Errorf("too few chunks in batch")
|
||||
}
|
||||
|
||||
// batch data hash
|
||||
dataHash, err := computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
@@ -223,18 +227,11 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
}
|
||||
|
||||
// blob payload
|
||||
blob, z, err := constructBlobPayload(batch.Chunks)
|
||||
blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
daBatch := DABatch{
|
||||
Version: CodecV1Version,
|
||||
BatchIndex: batch.Index,
|
||||
@@ -277,59 +274,58 @@ func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
|
||||
}
|
||||
|
||||
// constructBlobPayload constructs the 4844 blob payload.
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) {
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
|
||||
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
|
||||
metadataLength := 2 + MaxNumChunks*4
|
||||
|
||||
// the raw (un-padded) blob payload
|
||||
blobBytes := make([]byte, metadataLength)
|
||||
|
||||
// the number of chunks that contain at least one L2 transaction
|
||||
numNonEmptyChunks := 0
|
||||
|
||||
// challenge digest preimage
|
||||
// 1 hash for metadata and 1 for each chunk
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
|
||||
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
|
||||
|
||||
// the challenge point z
|
||||
var z kzg4844.Point
|
||||
// the chunk data hash used for calculating the challenge preimage
|
||||
var chunkDataHash common.Hash
|
||||
|
||||
// blob metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(blobBytes[0:], uint16(len(chunks)))
|
||||
|
||||
// encode blob metadata and L2 transactions,
|
||||
// and simultaneously also build challenge preimage
|
||||
for chunkID, chunk := range chunks {
|
||||
currentChunkStartIndex := len(blobBytes)
|
||||
hasL2Tx := false
|
||||
|
||||
for _, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
hasL2Tx = true
|
||||
// encode L2 txs into blob payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, common.Hash{}, nil, err
|
||||
}
|
||||
blobBytes = append(blobBytes, rlpTxData...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// blob metadata: chunki_size
|
||||
chunkSize := len(blobBytes) - currentChunkStartIndex
|
||||
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
|
||||
if hasL2Tx {
|
||||
numNonEmptyChunks++
|
||||
if chunkSize := len(blobBytes) - currentChunkStartIndex; chunkSize != 0 {
|
||||
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
}
|
||||
|
||||
// challenge: compute chunk data hash
|
||||
hash := crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
|
||||
copy(challengePreimage[32+chunkID*32:], hash[:])
|
||||
chunkDataHash = crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
|
||||
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
|
||||
}
|
||||
|
||||
// blob metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(blobBytes[0:], uint16(numNonEmptyChunks))
|
||||
// if we have fewer than MaxNumChunks chunks, the rest
|
||||
// of the blob metadata is correctly initialized to 0,
|
||||
// but we need to add padding to the challenge preimage
|
||||
for chunkID := len(chunks); chunkID < MaxNumChunks; chunkID++ {
|
||||
// use the last chunk's data hash as padding
|
||||
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
|
||||
}
|
||||
|
||||
// challenge: compute metadata hash
|
||||
hash := crypto.Keccak256Hash(blobBytes[0:metadataLength])
|
||||
@@ -338,15 +334,30 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// convert raw data to BLSFieldElements
|
||||
blob, err := makeBlobCanonical(blobBytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
// compute z = challenge_digest % BLS_MODULUS
|
||||
challengeDigest := crypto.Keccak256Hash(challengePreimage[:])
|
||||
point := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
|
||||
copy(z[:], point.Bytes()[0:32])
|
||||
// compute blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
return blob, &z, nil
|
||||
// challenge: append blob versioned hash
|
||||
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
|
||||
|
||||
// compute z = challenge_digest % BLS_MODULUS
|
||||
challengeDigest := crypto.Keccak256Hash(challengePreimage)
|
||||
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
|
||||
pointBytes := pointBigInt.Bytes()
|
||||
|
||||
// the challenge point z
|
||||
var z kzg4844.Point
|
||||
start := 32 - len(pointBytes)
|
||||
copy(z[start:], pointBytes)
|
||||
|
||||
return blob, blobVersionedHash, &z, nil
|
||||
}
|
||||
|
||||
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
|
||||
@@ -443,8 +454,55 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
|
||||
return BlobDataProofArgs.Pack(values...)
|
||||
}
|
||||
|
||||
// Blob returns the blob of the batch.
|
||||
func (b *DABatch) Blob() *kzg4844.Blob {
|
||||
return b.blob
|
||||
}
|
||||
|
||||
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
|
||||
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
|
||||
// TODO: implement this function.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// EstimateChunkL1CommitBlobSize estimates the size of the L1 commit blob for a single chunk.
|
||||
func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
|
||||
metadataSize := uint64(2 + 4*MaxNumChunks) // over-estimate: adding metadata length
|
||||
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
|
||||
func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
|
||||
metadataSize := uint64(2 + 4*MaxNumChunks)
|
||||
var batchDataSize uint64
|
||||
for _, c := range b.Chunks {
|
||||
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
batchDataSize += chunkDataSize
|
||||
}
|
||||
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
}
|
||||
|
||||
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
|
||||
var dataSize uint64
|
||||
for _, block := range c.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dataSize += uint64(len(rlpTxData))
|
||||
}
|
||||
}
|
||||
}
|
||||
return dataSize, nil
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -6,8 +6,30 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// CodecVersion defines the version of encoder and decoder.
|
||||
type CodecVersion int
|
||||
|
||||
const (
|
||||
// CodecV0 represents the version 0 of the encoder and decoder.
|
||||
CodecV0 CodecVersion = iota
|
||||
|
||||
// CodecV1 represents the version 1 of the encoder and decoder.
|
||||
CodecV1
|
||||
|
||||
// txTypeTest is a special transaction type used in unit tests.
|
||||
txTypeTest = 0xff
|
||||
)
|
||||
|
||||
func init() {
|
||||
// make sure txTypeTest will not interfere with other transaction types
|
||||
if txTypeTest == types.LegacyTxType || txTypeTest == types.AccessListTxType || txTypeTest == types.DynamicFeeTxType || txTypeTest == types.BlobTxType || txTypeTest == types.L1MessageTxType {
|
||||
log.Crit("txTypeTest is overlapping with existing transaction types")
|
||||
}
|
||||
}
|
||||
|
||||
// Block represents an L2 block.
|
||||
type Block struct {
|
||||
Header *types.Header
|
||||
@@ -123,6 +145,10 @@ func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
case txTypeTest:
|
||||
// in the tests, we simply use `data` as the RLP-encoded transaction
|
||||
return data, nil
|
||||
|
||||
case types.L1MessageTxType: // L1MessageTxType is not supported
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)
|
||||
@@ -209,8 +235,3 @@ func (b *Batch) WithdrawRoot() common.Hash {
|
||||
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
|
||||
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].WithdrawRoot
|
||||
}
|
||||
|
||||
// NumChunks gets the number of chunks of the batch.
|
||||
func (b *Batch) NumChunks() uint64 {
|
||||
return uint64(len(b.Chunks))
|
||||
}
|
||||
|
||||
@@ -75,7 +75,6 @@ func TestUtilFunctions(t *testing.T) {
|
||||
assert.Equal(t, uint64(240000), chunk3.L2GasUsed())
|
||||
|
||||
// Test Batch methods
|
||||
assert.Equal(t, uint64(3), batch.NumChunks())
|
||||
assert.Equal(t, block6.Header.Root, batch.StateRoot())
|
||||
assert.Equal(t, block6.WithdrawRoot, batch.WithdrawRoot())
|
||||
}
|
||||
|
||||
91
common/types/message/auth_msg.go
Normal file
91
common/types/message/auth_msg.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
// HardForkName the hard fork name
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
89
common/types/message/legacy_auth_msg.go
Normal file
89
common/types/message/legacy_auth_msg.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type LegacyAuthMsg struct {
|
||||
// Message fields
|
||||
Identity *LegacyIdentity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// LegacyIdentity contains all the fields to be signed by the prover.
|
||||
type LegacyIdentity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LegacyAuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *LegacyAuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *LegacyIdentity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
@@ -58,26 +58,6 @@ const (
|
||||
ProofTypeBatch
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// GenerateToken generates token
|
||||
func GenerateToken() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
@@ -87,65 +67,6 @@ func GenerateToken() (string, error) {
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// ProofMsg is the data structure sent to the coordinator.
|
||||
type ProofMsg struct {
|
||||
*ProofDetail `json:"zkProof"`
|
||||
@@ -259,6 +180,7 @@ type ChunkInfo struct {
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
}
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) {
|
||||
hash, err := identity.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
|
||||
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -3,11 +3,16 @@ package utils
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/modern-go/reflect2"
|
||||
"github.com/scroll-tech/go-ethereum/core"
|
||||
)
|
||||
|
||||
// TryTimes try run several times until the function return true.
|
||||
@@ -59,3 +64,17 @@ func RandomURL() string {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(5000-1))
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
// ReadGenesis parses and returns the genesis file at the given path
|
||||
func ReadGenesis(genesisPath string) (*core.Genesis, error) {
|
||||
file, err := os.Open(filepath.Clean(genesisPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
genesis := new(core.Genesis)
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
return nil, errors.Join(err, file.Close())
|
||||
}
|
||||
return genesis, file.Close()
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.74"
|
||||
var tag = "v4.3.95"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"test:hardhat": "npx hardhat test",
|
||||
"test:forge": "forge test -vvv",
|
||||
"test:forge": "forge test -vvv --evm-version cancun",
|
||||
"test": "yarn test:hardhat && yarn test:forge",
|
||||
"solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'",
|
||||
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",
|
||||
|
||||
@@ -92,12 +92,10 @@ contract DeployL1BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
uint256[] memory _versions = new uint256[](2);
|
||||
address[] memory _verifiers = new address[](2);
|
||||
uint256[] memory _versions = new uint256[](1);
|
||||
address[] memory _verifiers = new address[](1);
|
||||
_versions[0] = 0;
|
||||
_verifiers[0] = address(zkEvmVerifierV1);
|
||||
_versions[1] = 1;
|
||||
_verifiers[1] = address(zkEvmVerifierV1);
|
||||
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IScrollChain
|
||||
/// @notice The interface for ScrollChain.
|
||||
interface IScrollChain {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -43,23 +45,23 @@ interface IScrollChain {
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice The latest finalized batch index.
|
||||
/// @return The latest finalized batch index.
|
||||
function lastFinalizedBatchIndex() external view returns (uint256);
|
||||
|
||||
/// @notice Return the batch hash of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The batch hash of a committed batch.
|
||||
function committedBatches(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the state root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The state root of a committed batch.
|
||||
function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the message root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The message root of a committed batch.
|
||||
function withdrawRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return whether the batch is finalized by batch index.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return Whether the batch is finalized by batch index.
|
||||
function isBatchFinalized(uint256 batchIndex) external view returns (bool);
|
||||
|
||||
/*****************************
|
||||
@@ -99,18 +101,6 @@ interface IScrollChain {
|
||||
bytes calldata aggrProof
|
||||
) external;
|
||||
|
||||
/// @notice Finalize a committed batch on layer 1 without providing proof.
|
||||
/// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch.
|
||||
/// @param prevStateRoot The state root of parent batch.
|
||||
/// @param postStateRoot The state root of current batch.
|
||||
/// @param withdrawRoot The withdraw trie root of current batch.
|
||||
function finalizeBatch(
|
||||
bytes calldata batchHeader,
|
||||
bytes32 prevStateRoot,
|
||||
bytes32 postStateRoot,
|
||||
bytes32 withdrawRoot
|
||||
) external;
|
||||
|
||||
/// @notice Finalize a committed batch (with blob) on layer 1.
|
||||
///
|
||||
/// @dev Memory layout of `blobDataProof`:
|
||||
@@ -132,18 +122,4 @@ interface IScrollChain {
|
||||
bytes calldata blobDataProof,
|
||||
bytes calldata aggrProof
|
||||
) external;
|
||||
|
||||
/// @notice Finalize a committed batch (with blob) on layer 1 without providing proof.
|
||||
/// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch`.
|
||||
/// @param prevStateRoot The state root of parent batch.
|
||||
/// @param postStateRoot The state root of current batch.
|
||||
/// @param withdrawRoot The withdraw trie root of current batch.
|
||||
/// @param blobDataProof The proof for blob data.
|
||||
function finalizeBatch4844(
|
||||
bytes calldata batchHeader,
|
||||
bytes32 prevStateRoot,
|
||||
bytes32 postStateRoot,
|
||||
bytes32 withdrawRoot,
|
||||
bytes calldata blobDataProof
|
||||
) external;
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
/// @title MultipleVersionRollupVerifier
|
||||
/// @notice Verifies aggregate zk proofs using the appropriate verifier.
|
||||
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************/
|
||||
|
||||
/// @notice The address of ScrollChain contract.
|
||||
address immutable scrollChain;
|
||||
address public immutable scrollChain;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
@@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// The verifiers are sorted by batchIndex in increasing order.
|
||||
mapping(uint256 => Verifier[]) public legacyVerifiers;
|
||||
|
||||
/// @notice Mapping from verifier version to the lastest used zkevm verifier.
|
||||
/// @notice Mapping from verifier version to the latest used zkevm verifier.
|
||||
mapping(uint256 => Verifier) public latestVerifier;
|
||||
|
||||
/***************
|
||||
@@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************************/
|
||||
|
||||
/// @notice Return the number of legacy verifiers.
|
||||
/// @param _version The version of legacy verifiers.
|
||||
/// @return The number of legacy verifiers.
|
||||
function legacyVerifiersLength(uint256 _version) external view returns (uint256) {
|
||||
return legacyVerifiers[_version].length;
|
||||
}
|
||||
@@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// @notice Compute the verifier should be used for specific batch.
|
||||
/// @param _version The version of verifier to query.
|
||||
/// @param _batchIndex The batch index to query.
|
||||
/// @return The address of verifier.
|
||||
function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) {
|
||||
// Normally, we will use the latest verifier.
|
||||
Verifier memory _verifier = latestVerifier[_version];
|
||||
@@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of zkevm verifier.
|
||||
/// @param _version The version of the verifier.
|
||||
/// @param _startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param _verifier The address of new verifier.
|
||||
function updateVerifier(
|
||||
|
||||
@@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*************/
|
||||
|
||||
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
|
||||
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
@@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*****************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
/// @param _batchHeader The header of the genesis batch.
|
||||
/// @param _stateRoot The state root of the genesis block.
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
// check genesis batch header length
|
||||
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
@@ -426,45 +428,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
function finalizeBatch(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot
|
||||
) external override OnlyProver whenNotPaused {
|
||||
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
// compute batch hash and verify
|
||||
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
|
||||
// verify previous state root.
|
||||
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
// avoid duplicated verification
|
||||
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
}
|
||||
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
// Pop finalized and non-skipped message from L1MessageQueue.
|
||||
_popL1Messages(
|
||||
BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
|
||||
BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
|
||||
BatchHeaderV0Codec.getL1MessagePopped(memPtr)
|
||||
);
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
/// @dev Memory layout of `_blobDataProof`:
|
||||
/// ```text
|
||||
@@ -514,7 +477,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
_postStateRoot,
|
||||
_withdrawRoot,
|
||||
_dataHash,
|
||||
_blobDataProof[0:64]
|
||||
_blobDataProof[0:64],
|
||||
_blobVersionedHash
|
||||
)
|
||||
);
|
||||
|
||||
@@ -546,59 +510,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
function finalizeBatch4844(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata _blobDataProof
|
||||
) external override OnlyProver whenNotPaused {
|
||||
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
// compute batch hash and verify
|
||||
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
|
||||
|
||||
// Calls the point evaluation precompile and verifies the output
|
||||
{
|
||||
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
|
||||
abi.encodePacked(_blobVersionedHash, _blobDataProof)
|
||||
);
|
||||
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
|
||||
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
||||
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
|
||||
(, uint256 result) = abi.decode(data, (uint256, uint256));
|
||||
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
|
||||
}
|
||||
|
||||
// verify previous state root.
|
||||
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
// avoid duplicated verification
|
||||
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
}
|
||||
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
// Pop finalized and non-skipped message from L1MessageQueue.
|
||||
_popL1Messages(
|
||||
BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
|
||||
BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
|
||||
BatchHeaderV1Codec.getL1MessagePopped(memPtr)
|
||||
);
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IRollupVerifier
|
||||
/// @notice The interface for rollup verifier.
|
||||
interface IRollupVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param batchIndex The batch index to verify.
|
||||
|
||||
@@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier {
|
||||
}
|
||||
|
||||
// decodes all RLP encoded data and stores their DATA items
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region.
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continuous memory region.
|
||||
// Expects that the RLP starts with a list that defines the length
|
||||
// of the whole RLP region.
|
||||
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
|
||||
@@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier {
|
||||
}
|
||||
|
||||
// the one and only boundary check
|
||||
// in case an attacker crafted a malicous payload
|
||||
// in case an attacker crafted a malicious payload
|
||||
// and succeeds in the prior verification steps
|
||||
// then this should catch any bogus accesses
|
||||
if iszero(eq(ptr, add(proof.offset, proof.length))) {
|
||||
|
||||
@@ -83,6 +83,8 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
|
||||
}
|
||||
|
||||
function testTransferUSDCRoles(address owner) external {
|
||||
hevm.assume(owner != address(0));
|
||||
|
||||
// non-whitelisted caller call, should revert
|
||||
hevm.expectRevert("only circle caller");
|
||||
gateway.transferUSDCRoles(owner);
|
||||
|
||||
@@ -38,7 +38,7 @@ make lint
|
||||
|
||||
## Configure
|
||||
|
||||
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
|
||||
The coordinator behavior can be configured using [`conf/config.json`](conf/config.json). Check the code comments under `ProverManager` in [`internal/config/config.go`](internal/config/config.go) for more details.
|
||||
|
||||
|
||||
## Start
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -49,7 +50,6 @@ func action(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
@@ -60,10 +60,16 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
genesisPath := ctx.String(utils.Genesis.Name)
|
||||
genesis, err := utils.ReadGenesis(genesisPath)
|
||||
if err != nil {
|
||||
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
|
||||
}
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
|
||||
apiSrv := apiServer(ctx, cfg, db, registry)
|
||||
apiSrv := apiServer(ctx, cfg, genesis.Config, db, registry)
|
||||
|
||||
log.Info(
|
||||
"Start coordinator api successfully.",
|
||||
@@ -90,9 +96,9 @@ func action(ctx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func apiServer(ctx *cli.Context, cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) *http.Server {
|
||||
func apiServer(ctx *cli.Context, cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *http.Server {
|
||||
router := gin.New()
|
||||
api.InitController(cfg, db, reg)
|
||||
api.InitController(cfg, chainCfg, db, reg)
|
||||
route.Route(router, cfg, reg)
|
||||
port := ctx.String(httpPortFlag.Name)
|
||||
srv := &http.Server{
|
||||
|
||||
@@ -10,11 +10,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
coordinatorConfig "scroll-tech/coordinator/internal/config"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
coordinatorConfig "scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -23,29 +25,35 @@ var (
|
||||
|
||||
// CoordinatorApp coordinator-test client manager.
|
||||
type CoordinatorApp struct {
|
||||
Config *coordinatorConfig.Config
|
||||
Config *coordinatorConfig.Config
|
||||
ChainConfig *params.ChainConfig
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
originFile string
|
||||
coordinatorFile string
|
||||
HTTPPort int64
|
||||
configOriginFile string
|
||||
chainConfigOriginFile string
|
||||
coordinatorFile string
|
||||
genesisFile string
|
||||
HTTPPort int64
|
||||
|
||||
args []string
|
||||
docker.AppAPI
|
||||
*cmd.Cmd
|
||||
}
|
||||
|
||||
// NewCoordinatorApp return a new coordinatorApp manager.
|
||||
func NewCoordinatorApp(base *docker.App, file string) *CoordinatorApp {
|
||||
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp)
|
||||
func NewCoordinatorApp(testApps *testcontainers.TestcontainerApps, configFile string, chainConfigFile string) *CoordinatorApp {
|
||||
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", testApps.Timestamp)
|
||||
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", testApps.Timestamp)
|
||||
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
httpPort := port.Int64() + httpStartPort
|
||||
coordinatorApp := &CoordinatorApp{
|
||||
base: base,
|
||||
originFile: file,
|
||||
coordinatorFile: coordinatorFile,
|
||||
HTTPPort: httpPort,
|
||||
args: []string{"--log.debug", "--config", coordinatorFile, "--http", "--http.port", strconv.Itoa(int(httpPort))},
|
||||
testApps: testApps,
|
||||
configOriginFile: configFile,
|
||||
chainConfigOriginFile: chainConfigFile,
|
||||
coordinatorFile: coordinatorFile,
|
||||
genesisFile: genesisFile,
|
||||
HTTPPort: httpPort,
|
||||
args: []string{"--log.debug", "--config", coordinatorFile, "--genesis", genesisFile, "--http", "--http.port", strconv.Itoa(int(httpPort))},
|
||||
}
|
||||
if err := coordinatorApp.MockConfig(true); err != nil {
|
||||
panic(err)
|
||||
@@ -55,14 +63,14 @@ func NewCoordinatorApp(base *docker.App, file string) *CoordinatorApp {
|
||||
|
||||
// RunApp run coordinator-test child process by multi parameters.
|
||||
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
|
||||
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
c.Cmd = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.Cmd.RunApp(func() bool { return c.Cmd.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release coordinator-test.
|
||||
func (c *CoordinatorApp) Free() {
|
||||
if !utils.IsNil(c.AppAPI) {
|
||||
c.AppAPI.WaitExit()
|
||||
if !utils.IsNil(c.Cmd) {
|
||||
c.Cmd.WaitExit()
|
||||
}
|
||||
_ = os.Remove(c.coordinatorFile)
|
||||
}
|
||||
@@ -74,8 +82,7 @@ func (c *CoordinatorApp) HTTPEndpoint() string {
|
||||
|
||||
// MockConfig creates a new coordinator config.
|
||||
func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
base := c.base
|
||||
cfg, err := coordinatorConfig.NewConfig(c.originFile)
|
||||
cfg, err := coordinatorConfig.NewConfig(c.configOriginFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -89,20 +96,40 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
MaxVerifierWorkers: 4,
|
||||
MinProverVersion: "v1.0.0",
|
||||
}
|
||||
cfg.DB.DSN = base.DBImg.Endpoint()
|
||||
endpoint, err := c.testApps.GetDBEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.DB.DSN = endpoint
|
||||
cfg.L2.ChainID = 111
|
||||
cfg.Auth.ChallengeExpireDurationSec = 1
|
||||
cfg.Auth.LoginExpireDurationSec = 1
|
||||
c.Config = cfg
|
||||
|
||||
genesis, err := utils.ReadGenesis(c.chainConfigOriginFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.ChainConfig = genesis.Config
|
||||
|
||||
if !store {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := json.Marshal(c.Config)
|
||||
coordinatorConfigData, err := json.Marshal(c.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
genesisConfigData, err := json.Marshal(genesis)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(c.coordinatorFile, data, 0600)
|
||||
if writeErr := os.WriteFile(c.coordinatorFile, coordinatorConfigData, 0600); writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
if writeErr := os.WriteFile(c.genesisFile, genesisConfigData, 0600); writeErr != nil {
|
||||
return writeErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"fork_name": "bernoulli",
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"assets_path": ""
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
module scroll-tech/coordinator
|
||||
|
||||
go 1.20
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
|
||||
@@ -59,6 +59,7 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
@@ -95,6 +96,7 @@ github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -117,11 +119,13 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
@@ -144,6 +148,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
@@ -153,6 +158,7 @@ github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
@@ -164,6 +170,7 @@ github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
@@ -173,8 +180,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -197,6 +204,7 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
|
||||
@@ -50,6 +50,7 @@ type Config struct {
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
MockMode bool `json:"mock_mode"`
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
|
||||
@@ -53,25 +53,44 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
// recover the public key
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
var publicKey string
|
||||
var err error
|
||||
if v.Message.HardForkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
HardForkName: v.Message.HardForkName,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
}
|
||||
|
||||
publicKey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
if v.Message.HardForkName == "" {
|
||||
v.Message.HardForkName = "shanghai"
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: publicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.HardForkName: v.Message.HardForkName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,5 +108,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
|
||||
if hardForkName, ok := claims[types.HardForkName]; ok {
|
||||
c.Set(types.HardForkName, hardForkName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
@@ -17,20 +17,18 @@ var (
|
||||
SubmitProof *SubmitProofController
|
||||
// Auth the auth controller
|
||||
Auth *AuthController
|
||||
|
||||
initControllerOnce sync.Once
|
||||
)
|
||||
|
||||
// InitController inits Controller with database
|
||||
func InitController(cfg *config.Config, db *gorm.DB, reg prometheus.Registerer) {
|
||||
initControllerOnce.Do(func() {
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
|
||||
if err != nil {
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) {
|
||||
vf, err := verifier.NewVerifier(cfg.ProverManager.Verifier)
|
||||
if err != nil {
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
Auth = NewAuthController(db)
|
||||
GetTask = NewGetTaskController(cfg, db, vf, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
|
||||
})
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
|
||||
|
||||
Auth = NewAuthController(db)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
|
||||
}
|
||||
|
||||
@@ -6,6 +6,9 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
@@ -20,15 +23,21 @@ import (
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
|
||||
getTaskAccessCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, db, vf.ChunkVK, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, db, vf.BatchVK, reg)
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_access_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
}, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}),
|
||||
}
|
||||
|
||||
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
|
||||
@@ -37,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier
|
||||
return ptc
|
||||
}
|
||||
|
||||
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return fmt.Errorf("get prover name from context failed")
|
||||
}
|
||||
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
|
||||
if !proverVersionExist {
|
||||
return fmt.Errorf("get prover version from context failed")
|
||||
}
|
||||
|
||||
ptc.getTaskAccessCounter.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverPublicKey: publicKey.(string),
|
||||
coordinatorType.LabelProverName: proverName.(string),
|
||||
coordinatorType.LabelProverVersion: proverVersion.(string),
|
||||
}).Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
@@ -54,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := ptc.incGetTaskAccessCounter(ctx); err != nil {
|
||||
log.Warn("get_task access counter inc failed", "error", err.Error())
|
||||
}
|
||||
|
||||
result, err := proverTask.Assign(ctx, &getTaskParameter)
|
||||
if err != nil {
|
||||
nerr := fmt.Errorf("return prover task err:%w", err)
|
||||
|
||||
@@ -30,8 +30,8 @@ func (c *Collector) cleanupChallenge() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopCleanChallengeChan:
|
||||
log.Info("the coordinator cleanupChallenge run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,10 @@ type Collector struct {
|
||||
db *gorm.DB
|
||||
ctx context.Context
|
||||
|
||||
stopTimeoutChan chan struct{}
|
||||
stopChunkTimeoutChan chan struct{}
|
||||
stopBatchTimeoutChan chan struct{}
|
||||
stopBatchAllChunkReadyChan chan struct{}
|
||||
stopCleanChallengeChan chan struct{}
|
||||
|
||||
proverTaskOrm *orm.ProverTask
|
||||
chunkOrm *orm.Chunk
|
||||
@@ -40,14 +43,17 @@ type Collector struct {
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
|
||||
c := &Collector{
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopTimeoutChan: make(chan struct{}),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopChunkTimeoutChan: make(chan struct{}),
|
||||
stopBatchTimeoutChan: make(chan struct{}),
|
||||
stopBatchAllChunkReadyChan: make(chan struct{}),
|
||||
stopCleanChallengeChan: make(chan struct{}),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
|
||||
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_timeout_checker_run_total",
|
||||
@@ -83,7 +89,10 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
|
||||
// Stop all the collector
|
||||
func (c *Collector) Stop() {
|
||||
c.stopTimeoutChan <- struct{}{}
|
||||
c.stopChunkTimeoutChan <- struct{}{}
|
||||
c.stopBatchTimeoutChan <- struct{}{}
|
||||
c.stopBatchAllChunkReadyChan <- struct{}{}
|
||||
c.stopCleanChallengeChan <- struct{}{}
|
||||
}
|
||||
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
|
||||
@@ -113,8 +122,8 @@ func (c *Collector) timeoutBatchProofTask() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopBatchTimeoutChan:
|
||||
log.Info("the coordinator timeoutBatchProofTask run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -146,8 +155,8 @@ func (c *Collector) timeoutChunkProofTask() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopChunkTimeoutChan:
|
||||
log.Info("the coordinator timeoutChunkProofTask run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -253,8 +262,8 @@ func (c *Collector) checkBatchAllChunkReady() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopBatchAllChunkReadyChan:
|
||||
log.Info("the coordinator checkBatchAllChunkReady run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,5 +21,5 @@ func NewLoginLogic(db *gorm.DB) *LoginLogic {
|
||||
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.challengeOrm.InsertChallenge(ctx, challenge)
|
||||
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -11,8 +12,10 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -27,16 +30,22 @@ type BatchProverTask struct {
|
||||
BaseProverTask
|
||||
|
||||
batchAttemptsExceedTotal prometheus.Counter
|
||||
batchTaskGetTaskTotal prometheus.Counter
|
||||
batchTaskGetTaskTotal *prometheus.CounterVec
|
||||
batchTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vk: vk,
|
||||
vkMap: vkMap,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
forkHeights: forkHeights,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
@@ -46,10 +55,11 @@ func NewBatchProverTask(cfg *config.Config, db *gorm.DB, vk string, reg promethe
|
||||
Name: "coordinator_batch_attempts_exceed_total",
|
||||
Help: "Total number of batch attempts exceed.",
|
||||
}),
|
||||
batchTaskGetTaskTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
batchTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_get_task_total",
|
||||
Help: "Total number of batch get task.",
|
||||
}),
|
||||
}, []string{"fork_name"}),
|
||||
batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"),
|
||||
}
|
||||
return bp
|
||||
}
|
||||
@@ -61,13 +71,48 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
|
||||
// so the hard fork chunk's start_block_number must be ForkBlockNumber
|
||||
var startChunkIndex uint64 = 0
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
startChunkIndex = startChunk.Index
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
// toChunk being nil only indicates that we haven't yet reached the fork boundary
|
||||
// don't need change the endChunkIndex of math.MaxInt64
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
var batchTask *orm.Batch
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBatchTask *orm.Batch
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -76,7 +121,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpBatchTask == nil {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -88,7 +133,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx, tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -123,20 +168,25 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx, &proverTask)
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.Inc()
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
@@ -166,6 +216,9 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
IsPadding: false,
|
||||
}
|
||||
if proof.ChunkInfo != nil {
|
||||
chunkInfo.TxBytes = proof.ChunkInfo.TxBytes
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
@@ -189,7 +242,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
|
||||
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx, batchTask.Hash); err != nil {
|
||||
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
|
||||
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -21,24 +23,26 @@ import (
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
|
||||
// ChunkProverTask the chunk prover task
|
||||
type ChunkProverTask struct {
|
||||
BaseProverTask
|
||||
|
||||
chunkAttemptsExceedTotal prometheus.Counter
|
||||
chunkTaskGetTaskTotal prometheus.Counter
|
||||
chunkTaskGetTaskTotal *prometheus.CounterVec
|
||||
chunkTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
func NewChunkProverTask(cfg *config.Config, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vk: vk,
|
||||
vkMap: vkMap,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
forkHeights: forkHeights,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
@@ -48,10 +52,11 @@ func NewChunkProverTask(cfg *config.Config, db *gorm.DB, vk string, reg promethe
|
||||
Name: "coordinator_chunk_attempts_exceed_total",
|
||||
Help: "Total number of chunk attempts exceed.",
|
||||
}),
|
||||
chunkTaskGetTaskTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
chunkTaskGetTaskTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_chunk_get_task_total",
|
||||
Help: "Total number of chunk get task.",
|
||||
}),
|
||||
}, []string{"fork_name"}),
|
||||
chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"),
|
||||
}
|
||||
return cp
|
||||
}
|
||||
@@ -63,13 +68,24 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
|
||||
if toBlockNum > getTaskParameter.ProverHeight {
|
||||
toBlockNum = getTaskParameter.ProverHeight + 1
|
||||
}
|
||||
|
||||
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
|
||||
var chunkTask *orm.Chunk
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpChunkTask *orm.Chunk
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx, getTaskParameter.ProverHeight, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -78,7 +94,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpChunkTask == nil {
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx, getTaskParameter.ProverHeight, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -90,7 +106,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx, tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -124,20 +140,25 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx, &proverTask)
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.Inc()
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
@@ -2,8 +2,12 @@ package provertask
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
@@ -13,6 +17,13 @@ import (
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
// ErrHardForkName indicates client request with the wrong hard fork name
|
||||
ErrHardForkName = fmt.Errorf("wrong hard fork name")
|
||||
)
|
||||
|
||||
// ProverTask the interface of a collector who send data to prover
|
||||
type ProverTask interface {
|
||||
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
|
||||
@@ -22,7 +33,10 @@ type ProverTask interface {
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
vk string
|
||||
|
||||
vkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
@@ -35,6 +49,7 @@ type proverTaskContext struct {
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
HardForkName string
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
@@ -59,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
|
||||
}
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != b.vk {
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
@@ -73,7 +100,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx, publicKey.(string))
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
|
||||
}
|
||||
@@ -81,7 +108,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
|
||||
}
|
||||
|
||||
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
|
||||
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
|
||||
}
|
||||
@@ -91,3 +118,37 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
return &ptc, nil
|
||||
}
|
||||
|
||||
func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error) {
|
||||
// when the first hard fork upgrade, the prover don't pass the fork_name to coordinator.
|
||||
// so coordinator need to be compatible.
|
||||
if forkName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
hardForkNumber, exist := b.nameForkMap[forkName]
|
||||
if !exist {
|
||||
return 0, ErrHardForkName
|
||||
}
|
||||
|
||||
return hardForkNumber, nil
|
||||
}
|
||||
|
||||
var (
|
||||
getTaskCounterInitOnce sync.Once
|
||||
getTaskCounterVec *prometheus.CounterVec = nil
|
||||
)
|
||||
|
||||
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
|
||||
getTaskCounterInitOnce.Do(func() {
|
||||
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
}, []string{"task_type",
|
||||
coordinatorType.LabelProverName,
|
||||
coordinatorType.LabelProverPublicKey,
|
||||
coordinatorType.LabelProverVersion})
|
||||
})
|
||||
|
||||
return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType})
|
||||
}
|
||||
|
||||
@@ -134,18 +134,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
hardForkName := ctx.GetString(coordinatorType.HardForkName)
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
if proofParameter.UUID != "" {
|
||||
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx, proofParameter.UUID, pk)
|
||||
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
}
|
||||
} else {
|
||||
// TODO When prover all have upgrade, need delete this logic
|
||||
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
|
||||
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
@@ -156,29 +157,28 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
|
||||
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
|
||||
if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.verifierTotal.WithLabelValues(pv).Inc()
|
||||
|
||||
var success bool
|
||||
success := true
|
||||
var verifyErr error
|
||||
if proofMsg.Type == message.ProofTypeChunk {
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof)
|
||||
} else if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof)
|
||||
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
|
||||
if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
m.verifierFailureTotal.WithLabelValues(pv).Inc()
|
||||
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
|
||||
if verifyErr != nil {
|
||||
return ErrValidatorFailureVerifiedFailed
|
||||
@@ -189,12 +189,12 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
|
||||
|
||||
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
|
||||
|
||||
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
|
||||
if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil {
|
||||
m.proofSubmitFailure.Inc()
|
||||
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
|
||||
|
||||
return ErrCoordinatorInternalFailure
|
||||
}
|
||||
@@ -203,17 +203,17 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
|
||||
batchHash, err := m.chunkOrm.GetChunkBatchHash(ctx, chunkHash)
|
||||
batch, err := m.chunkOrm.GetChunkByHash(ctx, chunkHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batchHash)
|
||||
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batch.BatchHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if allReady {
|
||||
err := m.batchOrm.UpdateChunkProofsStatusByBatchHash(ctx, batchHash, types.ChunkProofsStatusReady)
|
||||
err := m.batchOrm.UpdateChunkProofsStatusByBatchHash(ctx, batch.BatchHash, types.ChunkProofsStatusReady)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
m.validateFailureTotal.Inc()
|
||||
@@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
"cannot submit valid proof for a prover task twice",
|
||||
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", proverTask.ProverPublicKey,
|
||||
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
|
||||
)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
log.Info("proof generated by prover failed",
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
|
||||
"failureMessage", failureMsg)
|
||||
"failureMessage", failureMsg, "forkName", forkName)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
@@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
}
|
||||
|
||||
@@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
|
||||
m.validateFailureProverTaskHaveVerifier.Inc()
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
return nil
|
||||
|
||||
BIN
coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey
Normal file
BIN
coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey
Normal file
Binary file not shown.
BIN
coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey
Normal file
BIN
coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey
Normal file
Binary file not shown.
@@ -9,8 +9,26 @@ import (
|
||||
)
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
|
||||
return &Verifier{}, nil
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
@@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
}
|
||||
|
||||
// VerifyBatchProof return a mock verification result for a BatchProof.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
BatchVK string
|
||||
ChunkVK string
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]string
|
||||
BatchVKMap map[string]string
|
||||
}
|
||||
|
||||
@@ -11,9 +11,11 @@ package verifier
|
||||
import "C" //nolint:typecheck
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"unsafe"
|
||||
@@ -28,7 +30,26 @@ import (
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
return &Verifier{cfg: cfg}, nil
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
paramsPathStr := C.CString(cfg.ParamsPath)
|
||||
assetsPathStr := C.CString(cfg.AssetsPath)
|
||||
@@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
C.init_batch_verifier(paramsPathStr, assetsPathStr)
|
||||
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
|
||||
|
||||
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]string),
|
||||
BatchVKMap: make(map[string]string),
|
||||
}
|
||||
|
||||
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.BatchVKMap[cfg.ForkName] = batchVK
|
||||
v.ChunkVKMap[cfg.ForkName] = chunkVK
|
||||
|
||||
return &Verifier{
|
||||
cfg: cfg,
|
||||
BatchVK: batchVK,
|
||||
ChunkVK: chunkVK,
|
||||
}, nil
|
||||
if err := v.loadEmbedVK(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, batch verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
@@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Info("Start to verify batch proof", "forkName", forkName)
|
||||
proofStr := C.CString(string(buf))
|
||||
forkNameStr := C.CString(forkName)
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(proofStr))
|
||||
C.free(unsafe.Pointer(forkNameStr))
|
||||
}()
|
||||
|
||||
log.Info("Start to verify batch proof ...")
|
||||
verified := C.verify_batch_proof(proofStr)
|
||||
verified := C.verify_batch_proof(proofStr, forkNameStr)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
@@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
func readVK(filePat string) (string, error) {
|
||||
func (v *Verifier) readVK(filePat string) (string, error) {
|
||||
f, err := os.Open(filePat)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) {
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
}
|
||||
|
||||
//go:embed legacy_vk/*
|
||||
var legacyVKFS embed.FS
|
||||
|
||||
func (v *Verifier) loadEmbedVK() error {
|
||||
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed batch vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed chunk vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -34,7 +33,7 @@ func TestFFI(t *testing.T) {
|
||||
AssetsPath: *assetsPath,
|
||||
}
|
||||
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
v, err := NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
@@ -50,7 +49,7 @@ func TestFFI(t *testing.T) {
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
|
||||
@@ -24,6 +24,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -54,6 +55,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -72,42 +77,34 @@ func (*Batch) TableName() string {
|
||||
|
||||
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
var batch Batch
|
||||
err := db.First(&batch).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBatches error: %w", err)
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err)
|
||||
}
|
||||
if batch.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatch retrieves assigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
var batch Batch
|
||||
err := db.First(&batch).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetAssignedBatches error: %w", err)
|
||||
return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err)
|
||||
}
|
||||
if batch.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
@@ -256,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -270,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ActiveAttempts: 0,
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -48,6 +48,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -70,44 +74,34 @@ func (*Chunk) TableName() string {
|
||||
|
||||
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, height int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("end_block_number <= ?", height)
|
||||
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
var chunk Chunk
|
||||
err := db.First(&chunk).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunks error: %w", err)
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
|
||||
}
|
||||
if chunk.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, height int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("end_block_number <= ?", height)
|
||||
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
var chunk Chunk
|
||||
err := db.First(&chunk).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunks error: %w", err)
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
|
||||
}
|
||||
if chunk.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
@@ -196,18 +190,33 @@ func (o *Chunk) CheckIfBatchChunkProofsAreReady(ctx context.Context, batchHash s
|
||||
return count == 0, nil
|
||||
}
|
||||
|
||||
// GetChunkBatchHash retrieves the batchHash of a given chunk.
|
||||
func (o *Chunk) GetChunkBatchHash(ctx context.Context, chunkHash string) (string, error) {
|
||||
// GetChunkByHash retrieves the given chunk.
|
||||
func (o *Chunk) GetChunkByHash(ctx context.Context, chunkHash string) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash = ?", chunkHash)
|
||||
db = db.Select("batch_hash")
|
||||
|
||||
var chunk Chunk
|
||||
if err := db.First(&chunk).Error; err != nil {
|
||||
return "", fmt.Errorf("Chunk.GetChunkBatchHash error: %w, chunk hash: %v", err, chunkHash)
|
||||
return nil, fmt.Errorf("Chunk.GetChunkByHash error: %w, chunk hash: %v", err, chunkHash)
|
||||
}
|
||||
return chunk.BatchHash, nil
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// GetChunkByStartBlockNumber retrieves the given chunk.
|
||||
func (o *Chunk) GetChunkByStartBlockNumber(ctx context.Context, startBlockNumber uint64) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("start_block_number = ?", startBlockNumber)
|
||||
|
||||
var chunk Chunk
|
||||
if err := db.First(&chunk).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Chunk.GetChunkByStartBlockNumber error: %w, chunk start block number: %v", err, startBlockNumber)
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// GetAttemptsByHash get chunk attempts by hash. Used by unit test
|
||||
@@ -295,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -9,41 +9,36 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
db *gorm.DB
|
||||
proverTaskOrm *ProverTask
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &testing.T{}
|
||||
setupEnv(t)
|
||||
defer tearDownEnv(t)
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
tearDownEnv(t)
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -56,10 +51,11 @@ func tearDownEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
sqlDB.Close()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestProverTaskOrm(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
@@ -9,6 +9,8 @@ const (
|
||||
ProverName = "prover_name"
|
||||
// ProverVersion the prover version for context
|
||||
ProverVersion = "prover_version"
|
||||
// HardForkName the fork name for context
|
||||
HardForkName = "hard_fork_name"
|
||||
)
|
||||
|
||||
// Message the login message struct
|
||||
@@ -16,6 +18,7 @@ type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// LoginParameter for /login api
|
||||
|
||||
@@ -2,7 +2,7 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
ProverHeight int `form:"prover_height" json:"prover_height"`
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"`
|
||||
}
|
||||
|
||||
10
coordinator/internal/types/metric.go
Normal file
10
coordinator/internal/types/metric.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package types
|
||||
|
||||
var (
|
||||
// LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below.
|
||||
LabelProverName = "prover_name"
|
||||
// LabelProverPublicKey label name for prover public key
|
||||
LabelProverPublicKey = "prover_pubkey"
|
||||
// LabelProverVersion label name for prover version
|
||||
LabelProverVersion = "prover_version"
|
||||
)
|
||||
@@ -15,13 +15,13 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/message"
|
||||
@@ -34,11 +34,17 @@ import (
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
)
|
||||
|
||||
var (
|
||||
dbCfg *database.Config
|
||||
conf *config.Config
|
||||
const (
|
||||
forkNumberFour = 4
|
||||
forkNumberThree = 3
|
||||
forkNumberTwo = 2
|
||||
forkNumberOne = 1
|
||||
)
|
||||
|
||||
base *docker.App
|
||||
var (
|
||||
conf *config.Config
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
@@ -49,20 +55,25 @@ var (
|
||||
|
||||
block1 *encoding.Block
|
||||
block2 *encoding.Block
|
||||
chunk *encoding.Chunk
|
||||
batch *encoding.Batch
|
||||
|
||||
chunk *encoding.Chunk
|
||||
hardForkChunk1 *encoding.Chunk
|
||||
hardForkChunk2 *encoding.Chunk
|
||||
|
||||
batch *encoding.Batch
|
||||
hardForkBatch1 *encoding.Batch
|
||||
hardForkBatch2 *encoding.Batch
|
||||
|
||||
tokenTimeout int
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func randomURL() string {
|
||||
@@ -70,9 +81,10 @@ func randomURL() string {
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
db, err = database.InitDB(dbCfg)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -84,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ChainID: 111,
|
||||
},
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
MockMode: true,
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
@@ -98,10 +112,28 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
},
|
||||
}
|
||||
|
||||
var chainConf params.ChainConfig
|
||||
for forkName, forkNumber := range nameForkMap {
|
||||
switch forkName {
|
||||
case "shanghai":
|
||||
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
|
||||
case "bernoulli":
|
||||
chainConf.BernoulliBlock = big.NewInt(forkNumber)
|
||||
case "london":
|
||||
chainConf.LondonBlock = big.NewInt(forkNumber)
|
||||
case "istanbul":
|
||||
chainConf.IstanbulBlock = big.NewInt(forkNumber)
|
||||
case "homestead":
|
||||
chainConf.HomesteadBlock = big.NewInt(forkNumber)
|
||||
case "eip155":
|
||||
chainConf.EIP155Block = big.NewInt(forkNumber)
|
||||
}
|
||||
}
|
||||
|
||||
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
|
||||
|
||||
router := gin.New()
|
||||
api.InitController(conf, db, nil)
|
||||
api.InitController(conf, &chainConf, db, nil)
|
||||
route.Route(router, conf, nil)
|
||||
srv := &http.Server{
|
||||
Addr: coordinatorURL,
|
||||
@@ -119,20 +151,18 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.1.98"
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
dbCfg = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err = database.InitDB(dbCfg)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -157,14 +187,18 @@ func setEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk = &encoding.Chunk{Blocks: []*encoding.Block{block1, block2}}
|
||||
hardForkChunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
hardForkChunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk}}
|
||||
hardForkBatch1 = &encoding.Batch{Index: 1, Chunks: []*encoding.Chunk{hardForkChunk1}}
|
||||
hardForkBatch2 = &encoding.Batch{Index: 2, Chunks: []*encoding.Chunk{hardForkChunk2}}
|
||||
}
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
// Set up the test environment.
|
||||
base = docker.NewDockerApp()
|
||||
setEnv(t)
|
||||
|
||||
t.Run("TestHandshake", testHandshake)
|
||||
@@ -175,17 +209,13 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestInvalidProof", testInvalidProof)
|
||||
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
|
||||
t.Run("TestTimeoutProof", testTimeoutProof)
|
||||
|
||||
// Teardown
|
||||
t.Cleanup(func() {
|
||||
base.Free()
|
||||
})
|
||||
t.Run("TestHardFork", testHardForkAssignTask)
|
||||
}
|
||||
|
||||
func testHandshake(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -198,7 +228,7 @@ func testHandshake(t *testing.T) {
|
||||
func testFailedHandshake(t *testing.T) {
|
||||
// Setup coordinator and http server.
|
||||
coordinatorURL := randomURL()
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
}()
|
||||
@@ -216,7 +246,7 @@ func testFailedHandshake(t *testing.T) {
|
||||
|
||||
func testGetTaskBlocked(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -226,18 +256,18 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, version.Version)
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
assert.True(t, batchProver.healthCheckSuccess(t))
|
||||
|
||||
err := proverBlockListOrm.InsertProverPublicKey(context.Background(), chunkProver.proverName, chunkProver.publicKey())
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
@@ -248,19 +278,19 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
|
||||
func testOutdatedProverVersion(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"homestead": forkNumberOne})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -273,19 +303,251 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
|
||||
func testHardForkAssignTask(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
proofType message.ProofType
|
||||
forkNumbers map[string]int64
|
||||
proverForkNames []string
|
||||
exceptTaskNumber int
|
||||
exceptGetTaskErrCodes []int
|
||||
exceptGetTaskErrMsgs []string
|
||||
}{
|
||||
{ // hard fork 4, prover 4 block [2-3]
|
||||
name: "noTaskForkChunkProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"bernoulli", "bernoulli"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"bernoulli", "bernoulli"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{ // hard fork 1, prover 1 block [2-3]
|
||||
name: "noTaskForkChunkProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"homestead", "homestead"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "homestead": forkNumberOne},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"homestead", "homestead"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{ // hard fork 3, prover 3 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"london", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "oneTaskForkBatchProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"london", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "oneTaskForkBatchProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{ // hard fork 4, prover 3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour, "istanbul": forkNumberTwo},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionMiddleHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionMiddleHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
exceptGetTaskErrMsgs: []string{"", ""},
|
||||
},
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"", "get empty prover task"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, tt.forkNumbers)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
chunkProof := &message.ChunkProof{
|
||||
StorageTrace: []byte("testStorageTrace"),
|
||||
Protocol: []byte("testProtocol"),
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
}
|
||||
|
||||
// the insert block number is 2 and 3
|
||||
// chunk1 batch1 contains block number 2
|
||||
// chunk2 batch2 contains block number 3
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbHardForkChunk1, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk1)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 2, dbHardForkChunk1.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk1.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
|
||||
assert.NoError(t, err)
|
||||
dbHardForkBatch1, err := batchOrm.InsertBatch(context.Background(), hardForkBatch1)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, dbHardForkBatch1.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch1.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbHardForkChunk2, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk2)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 3, 100, dbHardForkChunk2.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk2.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
|
||||
assert.NoError(t, err)
|
||||
dbHardForkBatch2, err := batchOrm.InsertBatch(context.Background(), hardForkBatch2)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 1, 1, dbHardForkBatch2.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch2.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
getTaskNumber := 0
|
||||
for i := 0; i < 2; i++ {
|
||||
mockProver := newMockProver(t, fmt.Sprintf("mock_prover_%d", i), coordinatorURL, tt.proofType, version.Version)
|
||||
proverTask, errCode, errMsg := mockProver.getProverTask(t, tt.proofType, tt.proverForkNames[i])
|
||||
assert.Equal(t, tt.exceptGetTaskErrCodes[i], errCode)
|
||||
assert.Equal(t, tt.exceptGetTaskErrMsgs[i], errMsg)
|
||||
if errCode != types.Success {
|
||||
continue
|
||||
}
|
||||
getTaskNumber++
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
|
||||
}
|
||||
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -311,16 +573,15 @@ func testValidProof(t *testing.T) {
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
|
||||
// only prover 0 & 1 submit valid proofs.
|
||||
proofStatus := generatedFailed
|
||||
if i <= 1 {
|
||||
proofStatus = verifiedSuccess
|
||||
}
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
proofStatus := verifiedSuccess
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -369,7 +630,7 @@ func testValidProof(t *testing.T) {
|
||||
func testInvalidProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -386,32 +647,21 @@ func testInvalidProof(t *testing.T) {
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create mock provers.
|
||||
provers := make([]*mockProver, 2)
|
||||
for i := 0; i < len(provers); i++ {
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
proofType := message.ProofTypeBatch
|
||||
provingStatus := verifiedFailed
|
||||
expectErrCode := types.ErrCoordinatorHandleZkProofFailure
|
||||
prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
|
||||
var (
|
||||
chunkProofStatus types.ProvingStatus
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
batchProofStatus types.ProvingStatus
|
||||
chunkActiveAttempts int16
|
||||
chunkMaxAttempts int16
|
||||
batchActiveAttempts int16
|
||||
batchMaxAttempts int16
|
||||
)
|
||||
@@ -419,24 +669,17 @@ func testInvalidProof(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
|
||||
if batchProofStatus == types.ProvingTaskAssigned {
|
||||
return
|
||||
}
|
||||
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(chunkMaxAttempts))
|
||||
assert.Equal(t, 0, int(chunkActiveAttempts))
|
||||
|
||||
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(batchMaxAttempts))
|
||||
assert.Equal(t, 0, int(batchActiveAttempts))
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
|
||||
t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -445,7 +688,7 @@ func testInvalidProof(t *testing.T) {
|
||||
func testProofGeneratedFailed(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -472,9 +715,11 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
proverTask := provers[i].getProverTask(t, proofType)
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -532,7 +777,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
func testTimeoutProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
|
||||
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL, map[string]int64{"istanbul": forkNumberTwo})
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
@@ -558,12 +803,16 @@ func testTimeoutProof(t *testing.T) {
|
||||
|
||||
// create first chunk & batch mock prover, that will not send any proof.
|
||||
chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), coordinatorURL, message.ProofTypeChunk, version.Version)
|
||||
proverChunkTask := chunkProver1.getProverTask(t, message.ProofTypeChunk)
|
||||
proverChunkTask, errChunkCode, errChunkMsg := chunkProver1.getProverTask(t, message.ProofTypeChunk, "istanbul")
|
||||
assert.NotNil(t, proverChunkTask)
|
||||
assert.Equal(t, errChunkCode, types.Success)
|
||||
assert.Equal(t, errChunkMsg, "")
|
||||
|
||||
batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), coordinatorURL, message.ProofTypeBatch, version.Version)
|
||||
proverBatchTask := batchProver1.getProverTask(t, message.ProofTypeBatch)
|
||||
proverBatchTask, errBatchCode, errBatchMsg := batchProver1.getProverTask(t, message.ProofTypeBatch, "istanbul")
|
||||
assert.NotNil(t, proverBatchTask)
|
||||
assert.Equal(t, errBatchCode, types.Success)
|
||||
assert.Equal(t, errBatchMsg, "")
|
||||
|
||||
// verify proof status, it should be assigned, because prover didn't send any proof
|
||||
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
@@ -589,14 +838,18 @@ func testTimeoutProof(t *testing.T) {
|
||||
|
||||
// create second mock prover, that will send valid proof.
|
||||
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk, version.Version)
|
||||
proverChunkTask2 := chunkProver2.getProverTask(t, message.ProofTypeChunk)
|
||||
proverChunkTask2, chunkTask2ErrCode, chunkTask2ErrMsg := chunkProver2.getProverTask(t, message.ProofTypeChunk, "istanbul")
|
||||
assert.NotNil(t, proverChunkTask2)
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
|
||||
assert.Equal(t, chunkTask2ErrCode, types.Success)
|
||||
assert.Equal(t, chunkTask2ErrMsg, "")
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
|
||||
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
|
||||
proverBatchTask2 := batchProver2.getProverTask(t, message.ProofTypeBatch)
|
||||
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
|
||||
assert.NotNil(t, proverBatchTask2)
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
|
||||
assert.Equal(t, batchTask2ErrCode, types.Success)
|
||||
assert.Equal(t, batchTask2ErrMsg, "")
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
|
||||
// verify proof status, it should be verified now, because second prover sent valid proof
|
||||
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
|
||||
@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the prover manager.
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T) string {
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
|
||||
challengeString := r.challenge(t)
|
||||
return r.login(t, challengeString)
|
||||
return r.login(t, challengeString, forkName)
|
||||
}
|
||||
|
||||
func (r *mockProver) challenge(t *testing.T) string {
|
||||
@@ -76,18 +76,32 @@ func (r *mockProver) challenge(t *testing.T) string {
|
||||
return loginData.Token
|
||||
}
|
||||
|
||||
func (r *mockProver) login(t *testing.T, challengeString string) string {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
|
||||
var body string
|
||||
if forkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
HardForkName: forkName,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
|
||||
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
|
||||
var result ctypes.Response
|
||||
client := resty.New()
|
||||
@@ -135,9 +149,9 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) *types.GetTaskSchema {
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -156,18 +170,15 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) *t
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode())
|
||||
assert.Equal(t, ctypes.Success, result.ErrCode)
|
||||
|
||||
assert.NotEmpty(t, result.Data.TaskID)
|
||||
assert.NotEmpty(t, result.Data.TaskType)
|
||||
assert.NotEmpty(t, result.Data.TaskData)
|
||||
return &result.Data
|
||||
return &result.Data, result.ErrCode, result.ErrMsg
|
||||
}
|
||||
|
||||
// Testing expected errors returned by coordinator.
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
|
||||
//
|
||||
//nolint:unparam
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -190,7 +201,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
|
||||
return result.ErrCode, result.ErrMsg
|
||||
}
|
||||
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
|
||||
proofMsgStatus := message.StatusOk
|
||||
if proofStatus == generatedFailed {
|
||||
proofMsgStatus = message.StatusProofError
|
||||
@@ -233,7 +244,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
submitProof.Proof = string(encodeData)
|
||||
}
|
||||
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
submitProofData, err := json.Marshal(submitProof)
|
||||
|
||||
@@ -6,7 +6,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
)
|
||||
@@ -20,6 +20,7 @@ require (
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
@@ -31,7 +32,6 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
|
||||
@@ -58,8 +58,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
|
||||
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
|
||||
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
@@ -119,8 +119,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user