Compare commits

...

2 Commits

Author SHA1 Message Date
HAOYUatHZ
de1d9b98ec bump version (#666) 2023-07-22 15:37:13 +08:00
georgehao
58e07a7481 refactor(coordinator): update coordinator orm and layout (#521)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-07-22 09:24:58 +08:00
54 changed files with 15731 additions and 1936 deletions

View File

@@ -7,7 +7,7 @@ require (
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
github.com/jmoiron/sqlx v1.3.5
github.com/kataras/iris/v12 v12.2.0
github.com/lib/pq v1.10.7
github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19
github.com/modern-go/reflect2 v1.0.2
@@ -44,7 +44,7 @@ require (
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v20.10.21+incompatible // indirect
github.com/docker/docker v23.0.6+incompatible // indirect
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
@@ -54,7 +54,7 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gobwas/httphead v0.1.0 // indirect
github.com/gobwas/pool v0.2.1 // indirect
@@ -91,7 +91,7 @@ require (
github.com/mailgun/raymond/v2 v2.0.48 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mediocregopher/radix/v3 v3.8.1 // indirect
github.com/microcosm-cc/bluemonday v1.0.23 // indirect

View File

@@ -97,8 +97,8 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
@@ -144,8 +144,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@@ -303,8 +303,8 @@ github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awS
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
@@ -333,8 +333,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=

View File

@@ -75,6 +75,10 @@ linters-settings:
# report about shadowed variables
check-shadowing: true
gosec:
disable:
- G108
golint:
# minimal confidence for issues, default is 0.8
min-confidence: 0.8
@@ -227,7 +231,12 @@ issues:
- lll
source: "^//go:generate "
text: "long-lines"
# Exclude gosec issues for G108: Profiling endpoint is automatically exposed
- linters:
- gosec
text: "G108"
- linters:
- wsl
text: "return statements should not be cuddled if block has more than two lines"

View File

@@ -10,6 +10,8 @@ import (
"gorm.io/gorm"
"gorm.io/gorm/logger"
"gorm.io/gorm/utils"
cutils "scroll-tech/common/utils"
)
type gormLogger struct {
@@ -49,6 +51,14 @@ func InitDB(config *Config) (*gorm.DB, error) {
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: &tmpGormLogger,
NowFunc: func() time.Time {
// why set time to UTC.
// if now set this, the inserted data time will use local timezone. like 2023-07-18 18:24:00 CST+8
// but when inserted, store to postgres is 2023-07-18 18:24:00 UTC+0 the timezone is incorrect.
// As mysql dsn user:pass@tcp(127.0.0.1:3306)/dbname?charset=utf8mb4&parseTime=True&loc=Local, we cant set
// the timezone by loc=Local. but postgres's dsn don't have loc option to set timezone, so just need set the gorm option like that.
return cutils.NowUTC()
},
})
if err != nil {
return nil, err

View File

@@ -7,6 +7,7 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
@@ -65,8 +66,12 @@ func (i *ImgDB) Stop() error {
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
return err
}
// remove the stopped container.

View File

@@ -9,6 +9,7 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/common/cmd"
@@ -135,8 +136,11 @@ func (i *ImgGeth) Stop() error {
// check if container is running, stop the running container.
id := GetContainerID(i.name)
if id != "" {
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
return err
}
i.id = id

View File

@@ -3,9 +3,9 @@ module scroll-tech/common
go 1.19
require (
github.com/docker/docker v20.10.21+incompatible
github.com/docker/docker v23.0.6+incompatible
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19
github.com/modern-go/reflect2 v1.0.2
@@ -18,7 +18,8 @@ require (
)
require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
@@ -36,7 +37,7 @@ require (
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
@@ -55,23 +56,23 @@ require (
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/moby/term v0.5.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.27.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect
@@ -84,7 +85,6 @@ require (
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.5.3 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect

View File

@@ -18,13 +18,13 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
@@ -63,7 +63,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -80,8 +79,8 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -121,8 +120,8 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
@@ -216,8 +215,9 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU=
github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
@@ -263,8 +263,8 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -287,8 +287,8 @@ github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWV
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -296,8 +296,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
@@ -325,8 +325,8 @@ github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
@@ -384,8 +384,6 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@@ -563,7 +561,6 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -606,7 +603,6 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -699,7 +695,6 @@ gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -95,18 +95,24 @@ func (s RollerProveStatus) String() string {
}
}
// RollerFailureType is the type of a roller session's failure
type RollerFailureType int
// ProverTaskFailureType the type of prover task failure
type ProverTaskFailureType int
const (
// RollerFailureTypeUndefined indicates an unknown roller failure type
RollerFailureTypeUndefined RollerFailureType = iota
// ProverTaskFailureTypeUndefined indicates an unknown roller failure type
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
// ProverTaskFailureTypeTimeout prover task failure of timeout
ProverTaskFailureTypeTimeout
)
func (s RollerFailureType) String() string {
switch s {
func (r ProverTaskFailureType) String() string {
switch r {
case ProverTaskFailureTypeUndefined:
return "prover task failure undefined"
case ProverTaskFailureTypeTimeout:
return "prover task failure timeout"
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
return "illegal prover task failure type"
}
}

9
common/utils/timezone.go Normal file
View File

@@ -0,0 +1,9 @@
package utils
import "time"
// NowUTC get the utc time.Now
func NowUTC() time.Time {
utc, _ := time.LoadLocation("")
return time.Now().In(utc)
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.25"
var tag = "v4.0.26"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -19,8 +19,8 @@ test:
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
find ../common | grep libzktrie.so | xargs -I{} cp {} ./verifier/lib/
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/verifier/lib
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
@@ -29,13 +29,13 @@ mock_coordinator: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
test-verifier: libzkp
go test -tags ffi -timeout 0 -v ./verifier
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
test-gpu-verifier: libzkp
go test -tags="gpu ffi" -timeout 0 -v ./verifier
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
lint: ## Lint the files - used for CI
cp -r ../common/libzkp/interface ./verifier/lib
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder
@@ -45,4 +45,4 @@ docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}

View File

@@ -1,129 +0,0 @@
package coordinator
import (
"context"
"errors"
"fmt"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/metrics"
"scroll-tech/common/types/message"
)
var (
coordinatorRollersDisconnectsTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/rollers/disconnects/total", metrics.ScrollRegistry)
)
// RollerAPI for rollers inorder to register and submit proof
type RollerAPI interface {
RequestToken(authMsg *message.AuthMsg) (string, error)
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
SubmitProof(proof *message.ProofMsg) error
}
// RequestToken generates and sends back register token for roller
func (m *Manager) RequestToken(authMsg *message.AuthMsg) (string, error) {
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return "", errors.New("signature verification failed")
}
pubkey, _ := authMsg.PublicKey()
if token, ok := m.tokenCache.Get(pubkey); ok {
return token.(string), nil
}
token, err := message.GenerateToken()
if err != nil {
return "", errors.New("token generation failed")
}
m.tokenCache.Set(pubkey, token, cache.DefaultExpiration)
return token, nil
}
// Register register api for roller
func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
// Verify register message.
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return nil, errors.New("signature verification failed")
}
pubkey, _ := authMsg.PublicKey()
// Lock here to avoid malicious roller message replay before cleanup of token
m.registerMu.Lock()
if ok, err := m.VerifyToken(authMsg); !ok {
m.registerMu.Unlock()
return nil, err
}
// roller successfully registered, remove token associated with this roller
m.tokenCache.Delete(pubkey)
m.registerMu.Unlock()
// create or get the roller message channel
taskCh, err := m.register(pubkey, authMsg.Identity)
if err != nil {
return nil, err
}
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
rpcSub := notifier.CreateSubscription()
go func() {
defer func() {
m.freeRoller(pubkey)
log.Info("roller unregister", "name", authMsg.Identity.Name, "pubkey", pubkey)
}()
for {
select {
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case err := <-rpcSub.Err():
coordinatorRollersDisconnectsTotalCounter.Inc(1)
log.Warn("client stopped the ws connection", "name", authMsg.Identity.Name, "pubkey", pubkey, "err", err)
return
case <-notifier.Closed():
return
}
}
}()
log.Info("roller register", "name", authMsg.Identity.Name, "pubkey", pubkey, "version", authMsg.Identity.Version)
return rpcSub, nil
}
// SubmitProof roller pull proof
func (m *Manager) SubmitProof(proof *message.ProofMsg) error {
// Verify the signature
if ok, err := proof.Verify(); !ok {
if err != nil {
log.Error("failed to verify proof message", "error", err)
}
return errors.New("auth signature verify fail")
}
pubkey, _ := proof.PublicKey()
// Only allow registered pub-key.
if !m.existTaskIDForRoller(pubkey, proof.ID) {
return fmt.Errorf("the roller or session id doesn't exist, pubkey: %s, ID: %s", pubkey, proof.ID)
}
m.updateMetricRollerProofsLastFinishedTimestampGauge(pubkey)
err := m.handleZkProof(pubkey, proof.ProofDetail)
if err != nil {
return err
}
defer m.freeTaskIDForRoller(pubkey, proof.ID)
return nil
}

View File

@@ -1,98 +0,0 @@
package coordinator
import (
"fmt"
"time"
"scroll-tech/common/types"
)
// RollerDebugAPI roller api interface in order go get debug message.
type RollerDebugAPI interface {
// ListRollers returns all live rollers
ListRollers() ([]*RollerInfo, error)
// GetSessionInfo returns the session information given the session id.
GetSessionInfo(sessionID string) (*SessionInfo, error)
}
// RollerInfo records the roller name, pub key and active session info (id, start time).
type RollerInfo struct {
Name string `json:"name"`
Version string `json:"version"`
PublicKey string `json:"public_key"`
ActiveSession string `json:"active_session,omitempty"`
ActiveSessionStartTime time.Time `json:"active_session_start_time"` // latest proof start time.
}
// SessionInfo records proof create or proof verify failed session.
type SessionInfo struct {
ID string `json:"id"`
Status string `json:"status"`
StartTime time.Time `json:"start_time"`
FinishTime time.Time `json:"finish_time,omitempty"` // set to 0 if not finished
AssignedRollers []string `json:"assigned_rollers,omitempty"` // roller name list
Error string `json:"error,omitempty"` // empty string if no error encountered
}
// ListRollers returns all live rollers.
func (m *Manager) ListRollers() ([]*RollerInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
var res []*RollerInfo
for _, pk := range m.rollerPool.Keys() {
node, exist := m.rollerPool.Get(pk)
if !exist {
continue
}
roller := node.(*rollerNode)
info := &RollerInfo{
Name: roller.Name,
Version: roller.Version,
PublicKey: pk,
}
for id, sess := range m.sessions {
for _, proverTask := range sess.proverTasks {
if proverTask.ProverPublicKey == pk {
info.ActiveSessionStartTime = proverTask.CreatedAt
info.ActiveSession = id
break
}
}
}
res = append(res, info)
}
return res, nil
}
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
now := time.Now()
var nameList []string
for _, proverTask := range sess.proverTasks {
nameList = append(nameList, proverTask.ProverName)
}
info := SessionInfo{
ID: sess.taskID,
Status: status.String(),
AssignedRollers: nameList,
StartTime: sess.proverTasks[0].CreatedAt,
Error: errMsg,
}
if finished {
info.FinishTime = now
}
return &info
}
// GetSessionInfo returns the session information given the session id.
func (m *Manager) GetSessionInfo(sessionID string) (*SessionInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
if info, ok := m.failedSessionInfos[sessionID]; ok {
return info, nil
}
if s, ok := m.sessions[sessionID]; ok {
return newSessionInfo(s, types.ProvingTaskAssigned, "", false), nil
}
return nil, fmt.Errorf("no such session, sessionID: %s", sessionID)
}

View File

@@ -1,218 +0,0 @@
package coordinator
import (
"context"
"errors"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
func geneAuthMsg(t *testing.T) *message.AuthMsg {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test1",
},
}
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
assert.NoError(t, authMsg.SignWithKey(privKey))
return authMsg
}
var rollerManager *Manager
func init() {
rmConfig := config.RollerManagerConfig{}
rmConfig.Verifier = &config.VerifierConfig{MockMode: true}
rollerManager, _ = New(context.Background(), &rmConfig, nil)
}
func TestManager_RequestToken(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_request_token",
},
}
token, err := rollerManager.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token has already been distributed", t, func() {
tmpAuthMsg := geneAuthMsg(t)
key, _ := tmpAuthMsg.PublicKey()
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
rollerManager.tokenCache.Set(key, tokenCacheStored, time.Hour)
token, err := rollerManager.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, token, tokenCacheStored)
})
convey.Convey("token generation failure", t, func() {
tmpAuthMsg := geneAuthMsg(t)
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return "", errors.New("token generation failed")
})
defer patchGuard.Reset()
token, err := rollerManager.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token generation success", t, func() {
tmpAuthMsg := geneAuthMsg(t)
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return tokenCacheStored, nil
})
defer patchGuard.Reset()
token, err := rollerManager.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, tokenCacheStored, token)
})
}
func TestManager_Register(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_register",
},
}
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("verify token failure", t, func() {
tmpAuthMsg := geneAuthMsg(t)
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return false, errors.New("verify token failure")
})
defer patchGuard.Reset()
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("register failure", t, func() {
tmpAuthMsg := geneAuthMsg(t)
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
patchGuard.ApplyPrivateMethod(rollerManager, "register", func(*Manager, string, *message.Identity) (<-chan *message.TaskMsg, error) {
return nil, errors.New("register error")
})
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("notifier failure", t, func() {
tmpAuthMsg := geneAuthMsg(t)
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
return nil, false
})
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
assert.Equal(t, *subscription, rpc.Subscription{})
})
}
func TestManager_SubmitProof(t *testing.T) {
id := "10000"
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: id,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
var rp rollerNode
rp.TaskIDs = cmap.New()
rp.TaskIDs.Set(id, id)
convey.Convey("verify failure", t, func() {
var s *message.ProofMsg
patchGuard := gomonkey.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return false, errors.New("proof verify error")
})
defer patchGuard.Reset()
err := rollerManager.SubmitProof(proof)
assert.Error(t, err)
})
convey.Convey("existTaskIDForRoller failure", t, func() {
var s *cmap.ConcurrentMap
patchGuard := gomonkey.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
return nil, true
})
defer patchGuard.Reset()
var pm *message.ProofMsg
patchGuard.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
return true, nil
})
err := rollerManager.SubmitProof(proof)
assert.Error(t, err)
})
convey.Convey("handleZkProof failure", t, func() {
var pm *message.ProofMsg
patchGuard := gomonkey.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var s cmap.ConcurrentMap
patchGuard.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
return &rp, true
})
patchGuard.ApplyPrivateMethod(rollerManager, "handleZkProof", func(manager *Manager, pk string, msg *message.ProofDetail) error {
return errors.New("handle zk proof error")
})
err := rollerManager.SubmitProof(proof)
assert.Error(t, err)
})
convey.Convey("SubmitProof success", t, func() {
var pm *message.ProofMsg
patchGuard := gomonkey.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var s cmap.ConcurrentMap
patchGuard.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
return &rp, true
})
patchGuard.ApplyPrivateMethod(rollerManager, "handleZkProof", func(manager *Manager, pk string, msg *message.ProofDetail) error {
return nil
})
err := rollerManager.SubmitProof(proof)
assert.NoError(t, err)
})
}

View File

@@ -6,6 +6,9 @@ import (
"os"
"os/signal"
// enable the pprof
_ "net/http/pprof"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -14,8 +17,10 @@ import (
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/logic/rollermanager"
)
var app *cli.App
@@ -37,44 +42,34 @@ func init() {
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// init db handler
subCtx, cancel := context.WithCancel(ctx.Context)
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
proofCollector := cron.NewCollector(subCtx, db, cfg)
rollermanager.InitRollerManager(db)
defer func() {
proofCollector.Stop()
cancel()
if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err)
log.Error("can not close db connection", "error", err)
}
}()
subCtx, cancel := context.WithCancel(ctx.Context)
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(subCtx, cfg.RollerManagerConfig, db)
defer func() {
cancel()
rollerManager.Stop()
}()
if err != nil {
return err
}
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Start all modules.
if err = rollerManager.Start(); err != nil {
log.Crit("couldn't start roller manager", "error", err)
}
apis := rollerManager.APIs()
apis := api.RegisterAPIs(cfg, db)
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(fmt.Sprintf("%s:%d", ctx.String(httpListenAddrFlag.Name), ctx.Int(httpPortFlag.Name)), apis)
@@ -89,8 +84,7 @@ func action(ctx *cli.Context) error {
}
// Register api and start ws service.
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)),
apis, cfg.RollerManagerConfig.CompressionLevel)
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)), apis, cfg.RollerManagerConfig.CompressionLevel)
if err != nil {
log.Crit("Could not start WS api", "error", err)
}

View File

@@ -10,7 +10,6 @@ require (
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
golang.org/x/exp v0.0.0-20230206171751-46f607a40771
golang.org/x/sync v0.3.0
gorm.io/gorm v1.25.2
)

View File

@@ -109,8 +109,6 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -0,0 +1,115 @@
package api
import (
"context"
"errors"
"fmt"
"time"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/proof"
)
// RollerController the roller api controller
type RollerController struct {
tokenCache *cache.Cache
proofReceiver *proof.ZKProofReceiver
taskWorker *proof.TaskWorker
}
// NewRollerController create a roller controller
func NewRollerController(cfg *config.RollerManagerConfig, db *gorm.DB) *RollerController {
return &RollerController{
proofReceiver: proof.NewZKProofReceiver(cfg, db),
taskWorker: proof.NewTaskWorker(),
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
}
}
// RequestToken get request token of authMsg
func (r *RollerController) RequestToken(authMsg *message.AuthMsg) (string, error) {
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return "", errors.New("signature verification failed")
}
pubkey, err := authMsg.PublicKey()
if err != nil {
return "", fmt.Errorf("RequestToken auth msg public key error:%w", err)
}
if token, ok := r.tokenCache.Get(pubkey); ok {
return token.(string), nil
}
token, err := message.GenerateToken()
if err != nil {
return "", errors.New("token generation failed")
}
r.tokenCache.SetDefault(pubkey, token)
return token, nil
}
// VerifyToken verifies pubkey for token and expiration time
func (r *RollerController) verifyToken(authMsg *message.AuthMsg) (bool, error) {
pubkey, err := authMsg.PublicKey()
if err != nil {
return false, fmt.Errorf("verify token auth msg public key error:%w", err)
}
// GetValue returns nil if value is expired
if token, ok := r.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
return false, fmt.Errorf("failed to find corresponding token. roller name: %s roller pk: %s", authMsg.Identity.Name, pubkey)
}
return true, nil
}
// Register register api for roller
func (r *RollerController) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
// Verify register message.
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return nil, errors.New("signature verification failed")
}
// Lock here to avoid malicious roller message replay before cleanup of token
if ok, err := r.verifyToken(authMsg); !ok {
return nil, err
}
pubkey, err := authMsg.PublicKey()
if err != nil {
return nil, fmt.Errorf("register auth msg public key error:%w", err)
}
// roller successfully registered, remove token associated with this roller
r.tokenCache.Delete(pubkey)
rpcSub, err := r.taskWorker.AllocTaskWorker(ctx, authMsg)
if err != nil {
return rpcSub, err
}
return rpcSub, nil
}
// SubmitProof roller pull proof
func (r *RollerController) SubmitProof(proof *message.ProofMsg) error {
// Verify the signature
if ok, err := proof.Verify(); !ok {
if err != nil {
log.Error("failed to verify proof message", "error", err)
}
return errors.New("auth signature verify fail")
}
err := r.proofReceiver.HandleZkProof(context.Background(), proof)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,304 @@
package api
import (
"context"
"crypto/ecdsa"
"database/sql"
"errors"
"fmt"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/proof"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
func geneAuthMsg(t *testing.T) (*message.AuthMsg, *ecdsa.PrivateKey) {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test1",
},
}
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
assert.NoError(t, authMsg.SignWithKey(privKey))
return authMsg, privKey
}
var rollerController *RollerController
func init() {
conf := &config.RollerManagerConfig{
TokenTimeToLive: 120,
}
conf.Verifier = &config.VerifierConfig{MockMode: true}
rollerController = NewRollerController(conf, nil)
}
func TestRoller_RequestToken(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_request_token",
},
}
token, err := rollerController.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token has already been distributed", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
key, err := tmpAuthMsg.PublicKey()
assert.NoError(t, err)
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
rollerController.tokenCache.Set(key, tokenCacheStored, time.Hour)
token, err := rollerController.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, token, tokenCacheStored)
})
convey.Convey("token generation failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return "", errors.New("token generation failed")
})
defer patchGuard.Reset()
token, err := rollerController.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token generation success", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return tokenCacheStored, nil
})
defer patchGuard.Reset()
token, err := rollerController.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, tokenCacheStored, token)
})
}
func TestRoller_Register(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_register",
},
}
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("verify token failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return false, errors.New("verify token failure")
})
defer patchGuard.Reset()
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("notifier failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
return nil, false
})
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
assert.Equal(t, *subscription, rpc.Subscription{})
})
convey.Convey("register failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var taskWorker *proof.TaskWorker
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
return nil, errors.New("register error")
})
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("register success", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var taskWorker *proof.TaskWorker
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
return nil, nil
})
_, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.NoError(t, err)
})
}
func TestRoller_SubmitProof(t *testing.T) {
tmpAuthMsg, prvKey := geneAuthMsg(t)
pubKey, err := tmpAuthMsg.PublicKey()
assert.NoError(t, err)
id := "rollers_info_test"
tmpProof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
Type: message.ProofTypeChunk,
ID: id,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
assert.NoError(t, tmpProof.Sign(prvKey))
proofPubKey, err := tmpProof.PublicKey()
assert.NoError(t, err)
assert.Equal(t, pubKey, proofPubKey)
var proverTaskOrm *orm.ProverTask
patchGuard := gomonkey.ApplyMethodFunc(proverTaskOrm, "GetProverTasks", func(ctx context.Context, fields map[string]interface{}, orderByList []string, offset, limit int) ([]orm.ProverTask, error) {
return nil, nil
})
defer patchGuard.Reset()
rollermanager.InitRollerManager(nil)
taskChan, err := rollermanager.Manager.Register(context.Background(), pubKey, tmpAuthMsg.Identity)
assert.NotNil(t, taskChan)
assert.NoError(t, err)
convey.Convey("verify failure", t, func() {
var s *message.ProofMsg
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return false, errors.New("proof verify error")
})
err = rollerController.SubmitProof(tmpProof)
assert.Error(t, err)
})
var s *message.ProofMsg
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return true, nil
})
var chunkOrm *orm.Chunk
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProofByHash", func(context.Context, string, *message.AggProof, uint64, ...*gorm.DB) error {
return nil
})
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
return nil
})
var batchOrm *orm.Batch
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProofByHash", func(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
return nil
})
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
return nil
})
convey.Convey("get none rollers of prover task", t, func() {
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
return nil, nil
})
tmpProof1 := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: "10001",
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
tmpProof1.Sign(privKey)
_, err1 := tmpProof1.PublicKey()
assert.NoError(t, err1)
err2 := rollerController.SubmitProof(tmpProof1)
fmt.Println(err2)
targetErr := fmt.Errorf("validator failure get none prover task for the proof")
assert.Equal(t, err2.Error(), targetErr.Error())
})
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
now := time.Now()
s := &orm.ProverTask{
TaskID: id,
ProverPublicKey: proofPubKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: "rollers_info_test",
ProvingStatus: int16(types.RollerAssigned),
CreatedAt: now,
}
return s, nil
})
patchGuard.ApplyMethodFunc(proverTaskOrm, "UpdateProverTaskProvingStatus", func(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
return nil
})
patchGuard.ApplyPrivateMethod(rollerController.proofReceiver, "proofFailure", func(hash string, pubKey string, proofMsgType message.ProofType) {
})
convey.Convey("proof msg status is not ok", t, func() {
tmpProof.Status = message.StatusProofError
err1 := rollerController.SubmitProof(tmpProof)
assert.NoError(t, err1)
})
tmpProof.Status = message.StatusOk
var db *gorm.DB
patchGuard.ApplyMethodFunc(db, "Transaction", func(fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) (err error) {
return nil
})
var tmpVerifier *verifier.Verifier
convey.Convey("verifier proof failure", t, func() {
targetErr := errors.New("verify proof failure")
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyProof", func(proof *message.AggProof) (bool, error) {
return false, targetErr
})
err1 := rollerController.SubmitProof(tmpProof)
assert.Nil(t, err1)
})
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyProof", func(proof *message.AggProof) (bool, error) {
return true, nil
})
patchGuard.ApplyPrivateMethod(rollerController.proofReceiver, "closeProofTask", func(hash string, pubKey string, proofMsg *message.ProofMsg, rollersInfo *coordinatorType.RollersInfo) error {
return nil
})
err1 := rollerController.SubmitProof(tmpProof)
assert.Nil(t, err1)
}

View File

@@ -0,0 +1,30 @@
package api
import (
"context"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
// RollerAPI for rollers inorder to register and submit proof
type RollerAPI interface {
RequestToken(authMsg *message.AuthMsg) (string, error)
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
SubmitProof(proof *message.ProofMsg) error
}
// RegisterAPIs register api for coordinator
func RegisterAPIs(cfg *config.Config, db *gorm.DB) []rpc.API {
return []rpc.API{
{
Namespace: "roller",
Service: RollerAPI(NewRollerController(cfg.RollerManagerConfig, db)),
Public: true,
},
}
}

View File

@@ -0,0 +1,166 @@
package cron
import (
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/collector"
"scroll-tech/coordinator/internal/orm"
)
// Collector collect the block batch or agg task to send to prover
type Collector struct {
cfg *config.Config
db *gorm.DB
ctx context.Context
stopRunChan chan struct{}
stopTimeoutChan chan struct{}
collectors map[message.ProofType]collector.Collector
proverTaskOrm *orm.ProverTask
chunkOrm *orm.Chunk
batchOrm *orm.Batch
}
// NewCollector create a collector to cron collect the data to send to prover
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config) *Collector {
c := &Collector{
cfg: cfg,
db: db,
ctx: ctx,
stopRunChan: make(chan struct{}),
stopTimeoutChan: make(chan struct{}),
collectors: make(map[message.ProofType]collector.Collector),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
}
c.collectors[message.ProofTypeBatch] = collector.NewBatchProofCollector(cfg, db)
c.collectors[message.ProofTypeChunk] = collector.NewChunkProofCollector(cfg, db)
go c.run()
go c.timeoutProofTask()
log.Info("Start coordinator successfully.")
return c
}
// Stop all the collector
func (c *Collector) Stop() {
c.stopRunChan <- struct{}{}
c.stopTimeoutChan <- struct{}{}
}
// run loop and cron collect
func (c *Collector) run() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("collector panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 2)
for {
select {
case <-ticker.C:
for _, tmpCollector := range c.collectors {
if err := tmpCollector.Collect(c.ctx); err != nil {
log.Warn("collect data to prover failure", "collector name", tmpCollector.Name(), "error", err)
}
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopRunChan:
log.Info("the coordinator run loop exit")
return
}
}
}
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutProofTask() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("timeout proof task panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 2)
for {
select {
case <-ticker.C:
assignedProverTasks, err := c.proverTaskOrm.GetAssignedProverTasks(c.ctx, 10)
if err != nil {
log.Error("get unassigned session info failure", "error", err)
break
}
for _, assignedProverTask := range assignedProverTasks {
timeoutDuration := time.Duration(c.cfg.RollerManagerConfig.CollectionTime) * time.Minute
// here not update the block batch proving status failed, because the collector loop will check
// the attempt times. if reach the times, the collector will set the block batch proving status.
if time.Since(assignedProverTask.AssignedAt) >= timeoutDuration {
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
err = c.db.Transaction(func(tx *gorm.DB) error {
// update prover task proving status as RollerProofInvalid
if err = c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.RollerProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update prover task failure type
if err = c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update the task to unassigned, let collector restart it
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
if err = c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
if err = c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
return nil
})
if err != nil {
log.Error("check task proof is timeout failure", "error", err)
}
}
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
return
}
}
}

View File

@@ -0,0 +1,111 @@
package collector
import (
"context"
"fmt"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// BatchProofCollector is collector implement for batch proof
type BatchProofCollector struct {
BaseCollector
}
// NewBatchProofCollector new a batch collector
func NewBatchProofCollector(cfg *config.Config, db *gorm.DB) *BatchProofCollector {
bp := &BatchProofCollector{
BaseCollector: BaseCollector{
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
},
}
return bp
}
// Name return the batch proof collector name
func (bp *BatchProofCollector) Name() string {
return BatchCollectorName
}
// Collect load and send batch tasks
func (bp *BatchProofCollector) Collect(ctx context.Context) error {
batchTasks, err := bp.batchOrm.GetUnassignedBatches(ctx, 1)
if err != nil {
return fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
}
if len(batchTasks) == 0 {
return nil
}
if len(batchTasks) != 1 {
return fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
}
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash)
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
return fmt.Errorf("no idle common roller when starting proof generation session, id:%s", batchTask.Hash)
}
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
return fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
}
rollerStatusList, err := bp.sendTask(ctx, batchTask.Hash)
if err != nil {
return fmt.Errorf("send batch task id:%s err:%w", batchTask.Hash, err)
}
transErr := bp.db.Transaction(func(tx *gorm.DB) error {
// Update session proving status as assigned.
if err = bp.batchOrm.UpdateProvingStatus(ctx, batchTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
return fmt.Errorf("failed to update task status, id:%s, error:%w", batchTask.Hash, err)
}
for _, rollerStatus := range rollerStatusList {
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: rollerStatus.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: rollerStatus.Name,
ProvingStatus: int16(types.RollerAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
return fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
}
}
return nil
})
return transErr
}
func (bp *BatchProofCollector) sendTask(ctx context.Context, taskID string) ([]*coordinatorType.RollerStatus, error) {
// get chunk proofs from db
chunkProofs, err := bp.chunkOrm.GetProofsByBatchHash(ctx, taskID)
if err != nil {
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", taskID, err)
return nil, err
}
return bp.BaseCollector.sendTask(message.ProofTypeBatch, taskID, nil, chunkProofs)
}

View File

@@ -0,0 +1,117 @@
package collector
import (
"context"
"fmt"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ChunkProofCollector the chunk proof collector
type ChunkProofCollector struct {
BaseCollector
}
// NewChunkProofCollector new a chunk proof collector
func NewChunkProofCollector(cfg *config.Config, db *gorm.DB) *ChunkProofCollector {
cp := &ChunkProofCollector{
BaseCollector: BaseCollector{
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
},
}
return cp
}
// Name return a block batch collector name
func (cp *ChunkProofCollector) Name() string {
return ChunkCollectorName
}
// Collect the chunk proof which need to prove
func (cp *ChunkProofCollector) Collect(ctx context.Context) error {
// load and send chunk tasks
chunkTasks, err := cp.chunkOrm.GetUnassignedChunks(ctx, 1)
if err != nil {
return fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
}
if len(chunkTasks) == 0 {
return nil
}
if len(chunkTasks) != 1 {
return fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
}
chunkTask := chunkTasks[0]
log.Info("start chunk generation session", "id", chunkTask.Hash)
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
return fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
}
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
return fmt.Errorf("no idle chunk roller when starting proof generation session, id:%s", chunkTask.Hash)
}
rollerStatusList, err := cp.sendTask(ctx, chunkTask.Hash)
if err != nil {
return fmt.Errorf("send task failure, id:%s error:%w", chunkTask.Hash, err)
}
transErr := cp.db.Transaction(func(tx *gorm.DB) error {
// Update session proving status as assigned.
if err = cp.chunkOrm.UpdateProvingStatus(ctx, chunkTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
log.Error("failed to update task status", "id", chunkTask.Hash, "err", err)
return err
}
for _, rollerStatus := range rollerStatusList {
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: rollerStatus.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: rollerStatus.Name,
ProvingStatus: int16(types.RollerAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
return fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, rollerStatus.PublicKey, err)
}
}
return nil
})
return transErr
}
func (cp *ChunkProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.RollerStatus, error) {
// Get block hashes.
wrappedBlocks, err := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
if err != nil {
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, err)
}
blockHashes := make([]common.Hash, len(wrappedBlocks))
for i, wrappedBlock := range wrappedBlocks {
blockHashes[i] = wrappedBlock.Header.Hash()
}
return cp.BaseCollector.sendTask(message.ProofTypeChunk, hash, blockHashes, nil)
}

View File

@@ -0,0 +1,125 @@
package collector
import (
"context"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
const (
// BatchCollectorName the name of batch collector
BatchCollectorName = "batch_collector"
// ChunkCollectorName the name of chunk collector
ChunkCollectorName = "chunk_collector"
)
var coordinatorSessionsTimeoutTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
// Collector the interface of a collector who send data to prover
type Collector interface {
Name() string
Collect(ctx context.Context) error
}
// BaseCollector a base collector which contain series functions
type BaseCollector struct {
cfg *config.Config
ctx context.Context
db *gorm.DB
batchOrm *orm.Batch
chunkOrm *orm.Chunk
blockOrm *orm.L2Block
proverTaskOrm *orm.ProverTask
}
// checkAttempts use the count of prover task info to check the attempts
func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.ProofType) bool {
whereFields := make(map[string]interface{})
whereFields["task_id"] = hash
whereFields["task_type"] = int16(taskType)
proverTasks, err := b.proverTaskOrm.GetProverTasks(b.ctx, whereFields, nil, 0, 0)
if err != nil {
log.Error("get prover task error", "hash id", hash, "error", err)
return true
}
if len(proverTasks) >= int(b.cfg.RollerManagerConfig.SessionAttempts) {
coordinatorSessionsTimeoutTotalCounter.Inc(1)
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
for _, proverTask := range proverTasks {
if types.ProvingStatus(proverTask.ProvingStatus) == types.ProvingTaskFailed {
rollermanager.Manager.FreeTaskIDForRoller(proverTask.ProverPublicKey, hash)
}
}
transErr := b.db.Transaction(func(tx *gorm.DB) error {
switch message.ProofType(proverTasks[0].TaskType) {
case message.ProofTypeChunk:
if err := b.chunkOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
}
case message.ProofTypeBatch:
if err := b.batchOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
}
}
// update the prover task status to let timeout checker don't check it.
if err := b.proverTaskOrm.UpdateAllProverTaskProvingStatusOfTaskID(b.ctx, message.ProofType(proverTasks[0].TaskType), hash, types.RollerProofInvalid, tx); err != nil {
log.Error("failed to update prover task proving_status as failed", "msg.ID", hash, "error", err)
}
return nil
})
if transErr == nil {
return false
}
}
return true
}
func (b *BaseCollector) sendTask(proveType message.ProofType, hash string, blockHashes []common.Hash, subProofs []*message.AggProof) ([]*coordinatorType.RollerStatus, error) {
sendMsg := &message.TaskMsg{
ID: hash,
Type: proveType,
BlockHashes: blockHashes,
SubProofs: subProofs,
}
var err error
var rollerStatusList []*coordinatorType.RollerStatus
for i := uint8(0); i < b.cfg.RollerManagerConfig.RollersPerSession; i++ {
rollerPubKey, rollerName, sendErr := rollermanager.Manager.SendTask(proveType, sendMsg)
if sendErr != nil {
err = sendErr
continue
}
rollermanager.Manager.UpdateMetricRollerProofsLastAssignedTimestampGauge(rollerPubKey)
rollerStatus := &coordinatorType.RollerStatus{
PublicKey: rollerPubKey,
Name: rollerName,
Status: types.RollerAssigned,
}
rollerStatusList = append(rollerStatusList, rollerStatus)
}
if err != nil {
return nil, err
}
return rollerStatusList, nil
}

View File

@@ -0,0 +1,311 @@
package proof
import (
"context"
"errors"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
)
var (
coordinatorProofsGeneratedFailedTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/generated/failed/time", metrics.ScrollRegistry)
coordinatorProofsReceivedTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
coordinatorProofsVerifiedSuccessTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/verified/success/time", metrics.ScrollRegistry)
coordinatorProofsVerifiedFailedTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/verified/failed/time", metrics.ScrollRegistry)
coordinatorSessionsFailedTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/sessions/failed/total", metrics.ScrollRegistry)
)
var (
// ErrValidatorFailureProofMsgStatusNotOk proof msg status not ok
ErrValidatorFailureProofMsgStatusNotOk = errors.New("validator failure proof msg status not ok")
// ErrValidatorFailureProverTaskEmpty get none prover task
ErrValidatorFailureProverTaskEmpty = errors.New("validator failure get none prover task for the proof")
// ErrValidatorFailureRollerInfoHasProofValid proof is vaild
ErrValidatorFailureRollerInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
)
// ZKProofReceiver the proof receiver
type ZKProofReceiver struct {
chunkOrm *orm.Chunk
batchOrm *orm.Batch
proverTaskOrm *orm.ProverTask
db *gorm.DB
cfg *config.RollerManagerConfig
verifier *verifier.Verifier
}
// NewZKProofReceiver create a proof receiver
func NewZKProofReceiver(cfg *config.RollerManagerConfig, db *gorm.DB) *ZKProofReceiver {
vf, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
return &ZKProofReceiver{
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
cfg: cfg,
db: db,
verifier: vf,
}
}
// HandleZkProof handle a ZkProof submitted from a roller.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.ProofMsg) error {
pk, _ := proofMsg.PublicKey()
rollermanager.Manager.UpdateMetricRollerProofsLastFinishedTimestampGauge(pk)
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
if err = m.validator(proverTask, pk, proofMsg); err != nil {
if errors.Is(err, ErrValidatorFailureProofMsgStatusNotOk) {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
}
return nil
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
// store proof content
var storeProofErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
if dbErr := m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.Proof, proofTimeSec, tx); dbErr != nil {
return fmt.Errorf("failed to store chunk proof into db, err:%w", dbErr)
}
if dbErr := m.chunkOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
return fmt.Errorf("failed to update chunk task status as proved, error:%w", dbErr)
}
return nil
})
case message.ProofTypeBatch:
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
if dbErr := m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.Proof, proofTimeSec, tx); dbErr != nil {
return fmt.Errorf("failed to store batch proof into db, error:%w", dbErr)
}
if dbErr := m.batchOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
return fmt.Errorf("failed to update batch task status as proved, error:%w", dbErr)
}
return nil
})
}
if storeProofErr != nil {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
log.Error("failed to store basic proof into db", "error", storeProofErr)
return storeProofErr
}
coordinatorProofsReceivedTotalCounter.Inc(1)
success, verifyErr := m.verifier.VerifyProof(proofMsg.Proof)
if verifyErr != nil || !success {
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
log.Error("failed to verify zk proof", "proof id", proofMsg.ID, "roller pk", pk, "prove type",
proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
}
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
// TODO: Roller needs to be slashed if proof is invalid.
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsVerifiedFailedTimeTimer(pk, proofTime)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
return nil
}
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg); err != nil {
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg.Type)
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsVerifiedSuccessTimeTimer(pk, proofTime)
return nil
}
func (m *ZKProofReceiver) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
batchHash, err := m.chunkOrm.GetChunkBatchHash(ctx, chunkHash)
if err != nil {
return err
}
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batchHash)
if err != nil {
return err
}
if allReady {
err := m.batchOrm.UpdateChunkProofsStatusByBatchHash(ctx, batchHash, types.ChunkProofsStatusReady)
if err != nil {
return err
}
}
return nil
}
func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
// Ensure this roller is eligible to participate in the prover task.
if types.RollerProveStatus(proverTask.ProvingStatus) == types.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn("roller has already submitted valid proof in proof session", "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof id", proofMsg.ProofDetail.ID)
return ErrValidatorFailureRollerInfoHasProofValid
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
if proofMsg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsGeneratedFailedTimeTimer(pk, proofTime)
log.Info("proof generated by roller failed", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
return ErrValidatorFailureProofMsgStatusNotOk
}
return nil
}
func (m *ZKProofReceiver) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskFailed); err != nil {
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
func (m *ZKProofReceiver) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
}
}
func (m *ZKProofReceiver) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg.Type, types.ProvingTaskVerified); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
return err
}
rollermanager.Manager.FreeTaskIDForRoller(pubKey, hash)
return nil
}
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ZKProofReceiver) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
// if the prover task failure type is SessionInfoFailureTimeout,
// just skip update the status because the proof result come too late.
if m.checkIsTimeoutFailure(ctx, hash, proverPublicKey) {
return nil
}
var proverTaskStatus types.RollerProveStatus
switch status {
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
proverTaskStatus = types.RollerProofInvalid
case types.ProvingTaskVerified:
proverTaskStatus = types.RollerProofValid
}
err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsgType, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
return updateErr
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsgType) {
return nil
}
switch proofMsgType {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
return err
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
return err
}
}
return nil
})
if err != nil {
return err
}
if status == types.ProvingTaskVerified && proofMsgType == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, hash); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr
}
}
return nil
}
func (m *ZKProofReceiver) checkIsTaskSuccess(ctx context.Context, hash string, proofType message.ProofType) bool {
var provingStatus types.ProvingStatus
var err error
switch proofType {
case message.ProofTypeChunk:
provingStatus, err = m.chunkOrm.GetProvingStatusByHash(ctx, hash)
if err != nil {
return false
}
case message.ProofTypeBatch:
provingStatus, err = m.batchOrm.GetProvingStatusByHash(ctx, hash)
if err != nil {
return false
}
}
return provingStatus == types.ProvingTaskVerified
}
func (m *ZKProofReceiver) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, hash, proverPublicKey)
if err != nil {
return false
}
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
return true
}
return false
}

View File

@@ -0,0 +1,79 @@
package proof
import (
"context"
"fmt"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/metrics"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/logic/rollermanager"
)
var coordinatorRollersDisconnectsTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/rollers/disconnects/total", metrics.ScrollRegistry)
// TaskWorker held the roller task connection
type TaskWorker struct{}
// NewTaskWorker create a task worker
func NewTaskWorker() *TaskWorker {
return &TaskWorker{}
}
// AllocTaskWorker alloc a task worker goroutine
func (t *TaskWorker) AllocTaskWorker(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
pubKey, err := authMsg.PublicKey()
if err != nil {
return &rpc.Subscription{}, fmt.Errorf("AllocTaskWorker auth msg public key error:%w", err)
}
identity := authMsg.Identity
// create or get the roller message channel
taskCh, err := rollermanager.Manager.Register(ctx, pubKey, identity)
if err != nil {
return &rpc.Subscription{}, err
}
rpcSub := notifier.CreateSubscription()
go t.worker(rpcSub, notifier, pubKey, identity, taskCh)
log.Info("roller register", "name", identity.Name, "pubKey", pubKey, "version", identity.Version)
return rpcSub, nil
}
// TODO worker add metrics
func (t *TaskWorker) worker(rpcSub *rpc.Subscription, notifier *rpc.Notifier, pubKey string, identity *message.Identity, taskCh <-chan *message.TaskMsg) {
defer func() {
if err := recover(); err != nil {
log.Error("task worker subId:%d panic for:%v", err)
}
rollermanager.Manager.FreeRoller(pubKey)
log.Info("roller unregister", "name", identity.Name, "pubKey", pubKey)
}()
for {
select {
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case err := <-rpcSub.Err():
coordinatorRollersDisconnectsTotalCounter.Inc(1)
log.Warn("client stopped the ws connection", "name", identity.Name, "pubkey", pubKey, "err", err)
return
case <-notifier.Closed():
return
}
}
}

View File

@@ -0,0 +1,60 @@
package rollermanager
import (
"time"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
)
type rollerMetrics struct {
rollerProofsVerifiedSuccessTimeTimer gethMetrics.Timer
rollerProofsVerifiedFailedTimeTimer gethMetrics.Timer
rollerProofsGeneratedFailedTimeTimer gethMetrics.Timer
rollerProofsLastAssignedTimestampGauge gethMetrics.Gauge
rollerProofsLastFinishedTimestampGauge gethMetrics.Gauge
}
func (r *rollerManager) UpdateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
}
}
}

View File

@@ -0,0 +1,203 @@
package rollermanager
import (
"context"
"crypto/rand"
"errors"
"fmt"
"math/big"
"sync"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/orm"
)
var (
once sync.Once
// Manager the global roller manager
Manager *rollerManager
)
// RollerNode is the interface that controls the rollers
type rollerNode struct {
// Roller name
Name string
// Roller type
Type message.ProofType
// Roller public key
PublicKey string
// Roller version
Version string
// task channel
taskChan chan *message.TaskMsg
// session id list which delivered to roller.
TaskIDs cmap.ConcurrentMap
// Time of message creation
registerTime time.Time
metrics *rollerMetrics
}
type rollerManager struct {
rollerPool cmap.ConcurrentMap
proverTaskOrm *orm.ProverTask
}
// InitRollerManager init a roller manager
func InitRollerManager(db *gorm.DB) {
once.Do(func() {
Manager = &rollerManager{
rollerPool: cmap.New(),
proverTaskOrm: orm.NewProverTask(db),
}
})
}
// Register the identity message to roller manager with the public key
func (r *rollerManager) Register(ctx context.Context, proverPublicKey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
node, ok := r.rollerPool.Get(proverPublicKey)
if !ok {
taskIDs, err := r.reloadRollerAssignedTasks(ctx, proverPublicKey)
if err != nil {
return nil, fmt.Errorf("register error:%w", err)
}
rMs := &rollerMetrics{
rollerProofsVerifiedSuccessTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsVerifiedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsGeneratedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsLastAssignedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsLastFinishedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
}
node = &rollerNode{
Name: identity.Name,
Type: identity.RollerType,
Version: identity.Version,
PublicKey: proverPublicKey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
metrics: rMs,
}
r.rollerPool.Set(proverPublicKey, node)
}
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "prover_name", identity.Name, "roller_type", identity.RollerType, "public key", proverPublicKey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
roller.registerTime = time.Now()
return roller.taskChan, nil
}
func (r *rollerManager) reloadRollerAssignedTasks(ctx context.Context, proverPublicKey string) (*cmap.ConcurrentMap, error) {
var assignedProverTasks []orm.ProverTask
page := 0
limit := 100
for {
page++
whereFields := make(map[string]interface{})
whereFields["proving_status"] = int16(types.RollerAssigned)
orderBy := []string{"id asc"}
offset := (page - 1) * limit
batchAssignedProverTasks, err := r.proverTaskOrm.GetProverTasks(ctx, whereFields, orderBy, offset, limit)
if err != nil {
log.Warn("reloadRollerAssignedTasks get all assigned failure", "error", err)
return nil, fmt.Errorf("reloadRollerAssignedTasks error:%w", err)
}
if len(batchAssignedProverTasks) < limit {
break
}
assignedProverTasks = append(assignedProverTasks, batchAssignedProverTasks...)
}
taskIDs := cmap.New()
for _, assignedProverTask := range assignedProverTasks {
if assignedProverTask.ProverPublicKey == proverPublicKey && assignedProverTask.ProvingStatus == int16(types.RollerAssigned) {
taskIDs.Set(assignedProverTask.TaskID, struct{}{})
}
}
return &taskIDs, nil
}
// SendTask send the need proved message to roller
func (r *rollerManager) SendTask(rollerType message.ProofType, msg *message.TaskMsg) (string, string, error) {
tmpRoller := r.selectRoller(rollerType)
if tmpRoller == nil {
return "", "", errors.New("selectRoller returns nil")
}
select {
case tmpRoller.taskChan <- msg:
tmpRoller.TaskIDs.Set(msg.ID, struct{}{})
default:
err := fmt.Errorf("roller channel is full, rollerName:%s, publicKey:%s", tmpRoller.Name, tmpRoller.PublicKey)
return "", "", err
}
r.UpdateMetricRollerProofsLastAssignedTimestampGauge(tmpRoller.PublicKey)
return tmpRoller.PublicKey, tmpRoller.Name, nil
}
// ExistTaskIDForRoller check the task exist
func (r *rollerManager) ExistTaskIDForRoller(pk string, id string) bool {
node, ok := r.rollerPool.Get(pk)
if !ok {
return false
}
roller := node.(*rollerNode)
return roller.TaskIDs.Has(id)
}
// FreeRoller free the roller with the pk key
func (r *rollerManager) FreeRoller(pk string) {
r.rollerPool.Pop(pk)
}
// FreeTaskIDForRoller free a task of the pk roller
func (r *rollerManager) FreeTaskIDForRoller(pk string, id string) {
if node, ok := r.rollerPool.Get(pk); ok {
roller := node.(*rollerNode)
roller.TaskIDs.Pop(id)
}
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (r *rollerManager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
for item := range r.rollerPool.IterBuffered() {
roller := item.Val.(*rollerNode)
if roller.TaskIDs.Count() == 0 && roller.Type == rollerType {
count++
}
}
return count
}
func (r *rollerManager) selectRoller(rollerType message.ProofType) *rollerNode {
pubkeys := r.rollerPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
if val, ok := r.rollerPool.Get(pubkeys[idx.Int64()]); ok {
rn := val.(*rollerNode)
if rn.TaskIDs.Count() == 0 && rn.Type == rollerType {
return rn
}
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}
return nil
}

View File

@@ -11,8 +11,7 @@ import (
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a mock halo2 verifier.
type Verifier struct {
}
type Verifier struct{}
// NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {

View File

@@ -1,6 +1,6 @@
//go:build ffi
package verifier_test
package verifier
import (
"encoding/json"
@@ -9,12 +9,11 @@ import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/verifier"
"github.com/stretchr/testify/assert"
)
var (
@@ -30,7 +29,7 @@ func TestFFI(t *testing.T) {
ParamsPath: *paramsPath,
AggVkPath: *aggVkPath,
}
v, err := verifier.NewVerifier(cfg)
v, err := NewVerifier(cfg)
as.NoError(err)
f, err := os.Open(*proofPath)

View File

@@ -203,7 +203,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
// UpdateChunkProofsStatusByBatchHash updates the status of chunk_proofs_status field for a given batch hash.
// The function will set the chunk_proofs_status to the status provided.
func (o *Chunk) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHash string, status types.ChunkProofsStatus) error {
func (o *Batch) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHash string, status types.ChunkProofsStatus) error {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash = ?", batchHash)
@@ -215,7 +215,11 @@ func (o *Chunk) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHas
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus) error {
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
@@ -228,7 +232,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
@@ -239,7 +243,11 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
}
// UpdateProofByHash updates the batch proof by hash.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
@@ -249,7 +257,7 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
db := o.db.WithContext(ctx)
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)

View File

@@ -245,7 +245,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk) (*Chunk, er
}
// UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus) error {
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
@@ -257,8 +257,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
@@ -269,7 +272,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
}
// UpdateProofByHash updates the chunk proof by hash.
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
@@ -279,7 +286,7 @@ func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *messa
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
db := o.db.WithContext(ctx)
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)

View File

@@ -12,6 +12,7 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
)
@@ -71,6 +72,7 @@ func TestProverTaskOrm(t *testing.T) {
ProverPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
Reward: decimal.NewFromBigInt(reward, 0),
AssignedAt: utils.NowUTC(),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
@@ -86,6 +88,7 @@ func TestProverTaskOrm(t *testing.T) {
assert.Equal(t, resultReward.String(), "18446744073709551616")
proverTask.ProvingStatus = int16(types.RollerProofValid)
proverTask.AssignedAt = utils.NowUTC()
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)
assert.NoError(t, err)
proverTasks, err = proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{"test-hash"})
@@ -108,6 +111,7 @@ func TestProverTaskOrmUint256(t *testing.T) {
ProverPublicKey: "0",
ProvingStatus: int16(types.RollerAssigned),
Reward: decimal.NewFromBigInt(rewardUint256, 0),
AssignedAt: utils.NowUTC(),
}
err = proverTaskOrm.SetProverTask(context.Background(), &proverTask)

View File

@@ -32,6 +32,7 @@ type ProverTask struct {
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward decimal.Decimal `json:"reward" gorm:"column:reward;default:0;type:decimal(78)"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
AssignedAt time.Time `json:"assigned_at" gorm:"assigned_at"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
@@ -49,6 +50,34 @@ func (*ProverTask) TableName() string {
return "prover_task"
}
// GetProverTasks get prover tasks
func (o *ProverTask) GetProverTasks(ctx context.Context, fields map[string]interface{}, orderByList []string, offset, limit int) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
for k, v := range fields {
db = db.Where(k, v)
}
for _, orderBy := range orderByList {
db = db.Order(orderBy)
}
if limit != 0 {
db = db.Limit(limit)
}
if offset != 0 {
db = db.Offset(offset)
}
var proverTasks []ProverTask
if err := db.Find(&proverTasks).Error; err != nil {
return nil, err
}
return proverTasks, nil
}
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
// The returned prover task objects are sorted in ascending order by their ids.
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string) ([]*ProverTask, error) {
@@ -68,13 +97,46 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string
return proverTasks, nil
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask) error {
// GetProverTaskByTaskIDAndPubKey get prover task taskID and public key
func (o *ProverTask) GetProverTaskByTaskIDAndPubKey(ctx context.Context, taskID, proverPublicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("prover_public_key", proverPublicKey)
var proverTask ProverTask
err := db.First(&proverTask).Error
if err != nil {
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndPubKey err:%w, taskID:%s, pubukey:%s", err, taskID, proverPublicKey)
}
return &proverTask, nil
}
// GetAssignedProverTasks get the assigned prover task
func (o *ProverTask) GetAssignedProverTasks(ctx context.Context, limit int) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("proving_status", int(types.RollerAssigned))
db = db.Limit(limit)
var proverTasks []ProverTask
err := db.Find(&proverTasks).Error
if err != nil {
return nil, fmt.Errorf("ProverTask.GetAssignedProverTasks error:%w", err)
}
return proverTasks, nil
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
db := o.db.WithContext(ctx)
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status"}),
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {
@@ -84,13 +146,63 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask)
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus) error {
db := o.db.WithContext(ctx)
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", proofType, taskID, pk)
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
if err := db.Update("proving_status", status).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, proof type: %v, taskID: %v, prover public key: %v, status: %v", err, proofType.String(), taskID, pk, status.String())
}
return nil
}
// UpdateAllProverTaskProvingStatusOfTaskID updates all the proving_status of a specific task id.
func (o *ProverTask) UpdateAllProverTaskProvingStatusOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ?", int(proofType), taskID)
if err := db.Update("proving_status", status).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateAllProverTaskProvingStatusOfTaskID error: %w, proof type: %v, taskID: %v, status: %v", err, proofType.String(), taskID, status.String())
}
return nil
}
// UpdateProverTaskFailureType update the prover task failure type
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, proofType message.ProofType, taskID string, pk string, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("prover_public_key", pk).Where("task_type", int(proofType))
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskFailureType error: %w, proof type: %v, taskID: %v, prover public key: %v, failure type: %v", err, proofType.String(), taskID, pk, failureType.String())
}
return nil
}
// UpdateAllProverTaskFailureTypeOfTaskID update the prover task failure type
func (o *ProverTask) UpdateAllProverTaskFailureTypeOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("task_type", int(proofType))
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateAllProverTaskFailureTypeOfTaskID error: %w, proof type: %v, taskID: %v, failure type: %v", err, proofType.String(), taskID, failureType.String())
}
return nil
}

View File

@@ -0,0 +1,21 @@
package types
import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
)
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// BatchInfo contains the BlockBatch's main info
type BatchInfo struct {
Index uint64 `json:"index"`
Hash string `json:"hash"`
StateRoot string `json:"state_root"`
}

View File

@@ -0,0 +1,21 @@
package types
import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
// RollersInfo is assigned rollers info of a task (session)
type RollersInfo struct {
ID string `json:"id"`
RollerStatusList []*RollerStatus `json:"rollers"`
StartTimestamp int64 `json:"start_timestamp"`
ProveType message.ProofType `json:"prove_type,omitempty"`
}
// RollerStatus is the roller name and roller prove status
type RollerStatus struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Status types.RollerProveStatus `json:"status"`
}

View File

@@ -1,853 +0,0 @@
package coordinator
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"golang.org/x/exp/rand"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils/workerpool"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/verifier"
)
var (
// proofs
coordinatorProofsReceivedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
coordinatorProofsVerifiedSuccessTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/success/time", metrics.ScrollRegistry)
coordinatorProofsVerifiedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/failed/time", metrics.ScrollRegistry)
coordinatorProofsGeneratedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/generated/failed/time", metrics.ScrollRegistry)
// sessions
coordinatorSessionsSuccessTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/success/total", metrics.ScrollRegistry)
coordinatorSessionsTimeoutTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
coordinatorSessionsFailedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/failed/total", metrics.ScrollRegistry)
coordinatorSessionsActiveNumberGauge = geth_metrics.NewRegisteredCounter("coordinator/sessions/active/number", metrics.ScrollRegistry)
)
const (
proofAndPkBufferSize = 10
)
type rollerProofStatus struct {
id string
typ message.ProofType
pk string
status types.RollerProveStatus
}
// Contains all the information on an ongoing proof generation session.
type session struct {
taskID string
proverTasks []*orm.ProverTask
// finish channel is used to pass the public key of the rollers who finished proving process.
finishChan chan rollerProofStatus
}
// Manager is responsible for maintaining connections with active rollers,
// sending the challenges, and receiving proofs. It also regulates the reward
// distribution. All read and write logic and connection handling happens through
// a modular websocket server, contained within the Manager. Incoming messages are
// then passed to the Manager where the actual handling logic resides.
type Manager struct {
// The manager context.
ctx context.Context
// The roller manager configuration.
cfg *config.RollerManagerConfig
// The indicator whether the backend is running or not.
running int32
// A mutex guarding the boolean below.
mu sync.RWMutex
// A map containing all active proof generation sessions.
sessions map[string]*session
// A map containing proof failed or verify failed proof.
rollerPool cmap.ConcurrentMap
failedSessionInfos map[string]*SessionInfo
// A direct connection to the Halo2 verifier, used to verify
// incoming proofs.
verifier *verifier.Verifier
// orm interface
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
proverTaskOrm *orm.ProverTask
// Token cache
tokenCache *cache.Cache
// A mutex guarding registration
registerMu sync.RWMutex
// Verifier worker pool
verifierWorkerPool *workerpool.WorkerPool
}
// New returns a new instance of Manager. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `manager.Start`.
func New(ctx context.Context, cfg *config.RollerManagerConfig, db *gorm.DB) (*Manager, error) {
v, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
return nil, err
}
log.Info("Start coordinator successfully.")
return &Manager{
ctx: ctx,
cfg: cfg,
rollerPool: cmap.New(),
sessions: make(map[string]*session),
failedSessionInfos: make(map[string]*SessionInfo),
verifier: v,
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
verifierWorkerPool: workerpool.NewWorkerPool(cfg.MaxVerifierWorkers),
}, nil
}
// Start the Manager module.
func (m *Manager) Start() error {
if m.isRunning() {
return nil
}
m.verifierWorkerPool.Run()
m.restorePrevSessions()
atomic.StoreInt32(&m.running, 1)
go m.Loop()
return nil
}
// Stop the Manager module, for a graceful shutdown.
func (m *Manager) Stop() {
if !m.isRunning() {
return
}
m.verifierWorkerPool.Stop()
atomic.StoreInt32(&m.running, 0)
}
// isRunning returns an indicator whether manager is running or not.
func (m *Manager) isRunning() bool {
return atomic.LoadInt32(&m.running) == 1
}
// Loop keeps the manager running.
func (m *Manager) Loop() {
var (
tick = time.NewTicker(time.Second * 2)
chunkTasks []*orm.Chunk
batchTasks []*orm.Batch
)
defer tick.Stop()
for {
select {
case <-tick.C:
// load and send batch tasks
if len(batchTasks) == 0 {
var err error
batchTasks, err = m.batchOrm.GetUnassignedBatches(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeBatch))
if err != nil {
log.Error("failed to get unassigned batch proving tasks", "error", err)
continue
}
}
// Select batch type roller and send message
for len(batchTasks) > 0 && m.StartBatchProofGenerationSession(batchTasks[0], nil) {
batchTasks = batchTasks[1:]
}
// load and send chunk tasks
if len(chunkTasks) == 0 {
// TODO: add cache
var err error
chunkTasks, err = m.chunkOrm.GetUnassignedChunks(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeChunk))
if err != nil {
log.Error("failed to get unassigned chunk proving tasks", "error", err)
continue
}
}
// Select chunk type roller and send message
for len(chunkTasks) > 0 && m.StartChunkProofGenerationSession(chunkTasks[0], nil) {
chunkTasks = chunkTasks[1:]
}
case <-m.ctx.Done():
if m.ctx.Err() != nil {
log.Error(
"manager context canceled with error",
"error", m.ctx.Err(),
)
}
return
}
}
}
func (m *Manager) restorePrevSessions() {
m.mu.Lock()
defer m.mu.Unlock()
var hashes []string
// load assigned batch tasks from db
batchTasks, err := m.batchOrm.GetAssignedBatches(m.ctx)
if err != nil {
log.Error("failed to load assigned batch tasks from db", "error", err)
return
}
for _, batchTask := range batchTasks {
hashes = append(hashes, batchTask.Hash)
}
// load assigned chunk tasks from db
chunkTasks, err := m.chunkOrm.GetAssignedChunks(m.ctx)
if err != nil {
log.Error("failed to get assigned batch batchHashes from db", "error", err)
return
}
for _, chunkTask := range chunkTasks {
hashes = append(hashes, chunkTask.Hash)
}
prevSessions, err := m.proverTaskOrm.GetProverTasksByHashes(m.ctx, hashes)
if err != nil {
log.Error("failed to recover roller session info from db", "error", err)
return
}
proverTasksMaps := make(map[string][]*orm.ProverTask)
for _, v := range prevSessions {
log.Info("restore roller info for session", "session start time", v.CreatedAt, "session id", v.TaskID, "roller name",
v.ProverName, "proof type", v.TaskType, "public key", v.ProverPublicKey, "proof status", v.ProvingStatus)
proverTasksMaps[v.TaskID] = append(proverTasksMaps[v.TaskID], v)
}
for taskID, proverTasks := range proverTasksMaps {
sess := &session{
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[taskID] = sess
go m.CollectProofs(sess)
}
}
// HandleZkProof handle a ZkProof submitted from a roller.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
var dbErr error
var success bool
// Assess if the proof generation session for the given ID is still active.
// We hold the read lock until the end of the function so that there is no
// potential race for channel deletion.
m.mu.RLock()
defer m.mu.RUnlock()
sess, ok := m.sessions[msg.ID]
if !ok {
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
}
var proverTask *orm.ProverTask
for _, si := range sess.proverTasks {
// get the send session info of this proof msg
if si.TaskID == msg.ID && si.ProverPublicKey == pk {
proverTask = si
}
}
if proverTask == nil {
return fmt.Errorf("proof generation session for id %v pk:%s does not existID", msg.ID, pk)
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
// Ensure this roller is eligible to participate in the prover task.
if types.RollerProveStatus(proverTask.ProvingStatus) == types.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn(
"roller has already submitted valid proof in proof session",
"roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey,
"proof type", proverTask.TaskType,
"proof id", msg.ID,
)
return nil
}
log.Info("handling zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName, "roller pk",
proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof time", proofTimeSec)
defer func() {
// TODO: maybe we should use db tx for the whole process?
// Roll back current proof's status.
if dbErr != nil {
if msg.Type == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset chunk task status as Unassigned", "msg.ID", msg.ID)
}
}
if msg.Type == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset batch task status as Unassigned", "msg.ID", msg.ID)
}
}
}
// set proof status
status := types.RollerProofInvalid
if success && dbErr == nil {
status = types.RollerProofValid
}
// notify the session that the roller finishes the proving process
sess.finishChan <- rollerProofStatus{msg.ID, msg.Type, pk, status}
}()
if msg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsGeneratedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info(
"proof generated by roller failed",
"proof id", msg.ID,
"roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey,
"proof type", msg.Type,
"proof time", proofTimeSec,
"error", msg.Error,
)
return nil
}
// store proof content
if msg.Type == message.ProofTypeChunk {
if dbErr = m.chunkOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
log.Error("failed to store chunk proof into db", "error", dbErr)
return dbErr
}
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update chunk task status as proved", "error", dbErr)
return dbErr
}
}
if msg.Type == message.ProofTypeBatch {
if dbErr = m.batchOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
log.Error("failed to store batch proof into db", "error", dbErr)
return dbErr
}
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update batch task status as proved", "error", dbErr)
return dbErr
}
}
coordinatorProofsReceivedTotalCounter.Inc(1)
var verifyErr error
// TODO: wrap both chunk verifier and batch verifier
success, verifyErr = m.verifyProof(msg.Proof)
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
// TODO: Roller needs to be slashed if proof is invalid.
}
if success {
if msg.Type == message.ProofTypeChunk {
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update chunk proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
return dbErr
}
if err := m.checkAreAllChunkProofsReady(msg.ID); err != nil {
log.Error("failed to check are all chunk proofs ready", "error", err)
return err
}
}
if msg.Type == message.ProofTypeBatch {
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update batch proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
return dbErr
}
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec)
} else {
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
}
return nil
}
func (m *Manager) checkAreAllChunkProofsReady(chunkHash string) error {
batchHash, err := m.chunkOrm.GetChunkBatchHash(m.ctx, chunkHash)
if err != nil {
return err
}
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(m.ctx, batchHash)
if err != nil {
return err
}
if allReady {
err := m.chunkOrm.UpdateChunkProofsStatusByBatchHash(m.ctx, batchHash, types.ChunkProofsStatusReady)
if err != nil {
return err
}
}
return nil
}
// checkAttempts use the count of prover task info to check the attempts
func (m *Manager) checkAttemptsExceeded(hash string) bool {
proverTasks, err := m.proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{hash})
if err != nil {
log.Error("get session info error", "hash id", hash, "error", err)
return true
}
if len(proverTasks) >= int(m.cfg.SessionAttempts) {
return true
}
return false
}
// CollectProofs collects proofs corresponding to a proof generation session.
func (m *Manager) CollectProofs(sess *session) {
coordinatorSessionsActiveNumberGauge.Inc(1)
defer coordinatorSessionsActiveNumberGauge.Dec(1)
for {
select {
//Execute after timeout, set in config.json. Consider all rollers failed.
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
if !m.checkAttemptsExceeded(sess.taskID) {
var success bool
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
success = m.StartBatchProofGenerationSession(nil, sess)
} else if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
success = m.StartChunkProofGenerationSession(nil, sess)
}
if success {
m.mu.Lock()
for _, v := range sess.proverTasks {
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
}
m.mu.Unlock()
log.Info("Retrying session", "session id:", sess.taskID)
return
}
}
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", sess.taskID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset chunk task_status as Unassigned", "task id", sess.taskID, "err", err)
}
}
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset batch task_status as Unassigned", "task id", sess.taskID, "err", err)
}
}
m.mu.Lock()
for _, v := range sess.proverTasks {
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
}
delete(m.sessions, sess.taskID)
m.mu.Unlock()
coordinatorSessionsTimeoutTotalCounter.Inc(1)
return
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
case ret := <-sess.finishChan:
m.mu.Lock()
for idx := range sess.proverTasks {
if sess.proverTasks[idx].ProverPublicKey == ret.pk {
sess.proverTasks[idx].ProvingStatus = int16(ret.status)
}
}
if sess.isSessionFailed() {
if ret.typ == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
if ret.typ == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
if err := m.proverTaskOrm.UpdateProverTaskProvingStatus(m.ctx, ret.typ, ret.id, ret.pk, ret.status); err != nil {
log.Error("failed to update session info proving status",
"proof type", ret.typ, "task id", ret.id, "pk", ret.pk, "status", ret.status, "error", err)
}
//Check if all rollers have finished their tasks, and rollers with valid results are indexed by public key.
finished, validRollers := sess.isRollersFinished()
//When all rollers have finished submitting their tasks, select a winner within rollers with valid proof, and return, terminate the for loop.
if finished && len(validRollers) > 0 {
//Select a random index for this slice.
randIndex := rand.Int63n(int64(len(validRollers)))
_ = validRollers[randIndex]
// TODO: reward winner
for _, proverTask := range sess.proverTasks {
m.freeTaskIDForRoller(proverTask.ProverPublicKey, proverTask.TaskID)
delete(m.sessions, proverTask.TaskID)
}
m.mu.Unlock()
coordinatorSessionsSuccessTotalCounter.Inc(1)
return
}
m.mu.Unlock()
}
}
}
// isRollersFinished checks if all rollers have finished submitting proofs, check their validity, and record rollers who produce valid proof.
// When rollersLeft reaches 0, it means all rollers have finished their tasks.
// validRollers also records the public keys of rollers who have finished their tasks correctly as index.
func (s *session) isRollersFinished() (bool, []string) {
var validRollers []string
for _, sessionInfo := range s.proverTasks {
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofValid {
validRollers = append(validRollers, sessionInfo.ProverPublicKey)
continue
}
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofInvalid {
continue
}
// Some rollers are still proving.
return false, nil
}
return true, validRollers
}
func (s *session) isSessionFailed() bool {
for _, sessionInfo := range s.proverTasks {
if types.RollerProveStatus(sessionInfo.ProvingStatus) != types.RollerProofInvalid {
return false
}
}
return true
}
// APIs collect API services.
func (m *Manager) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "roller",
Service: RollerAPI(m),
Public: true,
},
{
Namespace: "debug",
Public: true,
Service: RollerDebugAPI(m),
},
}
}
// StartChunkProofGenerationSession starts a chunk proof generation session
func (m *Manager) StartChunkProofGenerationSession(task *orm.Chunk, prevSession *session) (success bool) {
var taskID string
if task != nil {
taskID = task.Hash
} else {
taskID = prevSession.taskID
}
if m.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
log.Warn("no idle chunk roller when starting proof generation session", "id", taskID)
return false
}
log.Info("start chunk proof generation session", "id", taskID)
defer func() {
if !success {
if task != nil {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
}
} else {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
}
}
}
}()
// Get block hashes.
wrappedBlocks, err := m.l2BlockOrm.GetL2BlocksByChunkHash(m.ctx, taskID)
if err != nil {
log.Error(
"Failed to fetch wrapped blocks",
"batch hash", taskID,
"error", err,
)
return false
}
blockHashes := make([]common.Hash, len(wrappedBlocks))
for i, wrappedBlock := range wrappedBlocks {
blockHashes[i] = wrappedBlock.Header.Hash()
}
// Dispatch task to chunk rollers.
var proverTasks []*orm.ProverTask
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.ProofTypeChunk)
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(&message.TaskMsg{ID: taskID, Type: message.ProofTypeChunk, BlockHashes: blockHashes}) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
continue
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
proverTask := orm.ProverTask{
TaskID: taskID,
ProverPublicKey: roller.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: roller.Name,
ProvingStatus: int16(types.RollerAssigned),
FailureType: int16(types.RollerFailureTypeUndefined),
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
}
// Store prover task info.
if err = m.proverTaskOrm.SetProverTask(m.ctx, &proverTask); err != nil {
log.Error("db set session info fail", "session id", taskID, "error", err)
return false
}
proverTasks = append(proverTasks, &proverTask)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeChunk, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
}
// No roller assigned.
if len(proverTasks) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle chunk rollers", m.GetNumberOfIdleRollers(message.ProofTypeChunk))
return false
}
// Update session proving status as assigned.
if err = m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", taskID, "err", err)
return false
}
// Create a proof generation session.
sess := &session{
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
m.sessions[taskID] = sess
m.mu.Unlock()
go m.CollectProofs(sess)
return true
}
// StartBatchProofGenerationSession starts an batch proof generation.
func (m *Manager) StartBatchProofGenerationSession(task *orm.Batch, prevSession *session) (success bool) {
var taskID string
if task != nil {
taskID = task.Hash
} else {
taskID = prevSession.taskID
}
if m.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
return false
}
log.Info("start batch proof generation session", "id", taskID)
defer func() {
if !success {
if task != nil {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
} else if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
}
}
}
}()
// get chunk proofs from db
chunkProofs, err := m.chunkOrm.GetProofsByBatchHash(m.ctx, taskID)
if err != nil {
log.Error("failed to get chunk proofs for batch task", "session id", taskID, "error", err)
return false
}
// Dispatch task to chunk rollers.
var proverTasks []*orm.ProverTask
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.ProofTypeBatch)
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "type", roller.Type, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(&message.TaskMsg{
ID: taskID,
Type: message.ProofTypeBatch,
SubProofs: chunkProofs,
}) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
continue
}
proverTask := orm.ProverTask{
TaskID: taskID,
ProverPublicKey: roller.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: roller.Name,
ProvingStatus: int16(types.RollerAssigned),
FailureType: int16(types.RollerFailureTypeUndefined),
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
}
// Store session info.
if err = m.proverTaskOrm.SetProverTask(context.Background(), &proverTask); err != nil {
log.Error("db set session info fail", "session id", taskID, "error", err)
return false
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
proverTasks = append(proverTasks, &proverTask)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeBatch, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
}
// No roller assigned.
if len(proverTasks) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle batch rollers", m.GetNumberOfIdleRollers(message.ProofTypeBatch))
return false
}
// Update session proving status as assigned.
if err = m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", taskID, "err", err)
return false
}
// Create a proof generation session.
sess := &session{
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
m.sessions[taskID] = sess
m.mu.Unlock()
go m.CollectProofs(sess)
return true
}
func (m *Manager) addFailedSession(sess *session, errMsg string) {
m.mu.Lock()
defer m.mu.Unlock()
m.failedSessionInfos[sess.taskID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
}
// VerifyToken verifies pukey for token and expiration time
func (m *Manager) VerifyToken(authMsg *message.AuthMsg) (bool, error) {
pubkey, _ := authMsg.PublicKey()
// GetValue returns nil if value is expired
if token, ok := m.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
return false, fmt.Errorf("failed to find corresponding token. roller name: %s. roller pk: %s", authMsg.Identity.Name, pubkey)
}
return true, nil
}
func (m *Manager) addVerifyTask(proof *message.AggProof) chan verifyResult {
c := make(chan verifyResult, 1)
m.verifierWorkerPool.AddTask(func() {
result, err := m.verifier.VerifyProof(proof)
c <- verifyResult{result, err}
})
return c
}
func (m *Manager) verifyProof(proof *message.AggProof) (bool, error) {
if !m.isRunning() {
return false, errors.New("coordinator has stopped before verification")
}
verifyResultChan := m.addVerifyTask(proof)
result := <-verifyResultChan
return result.result, result.err
}
type verifyResult struct {
result bool
err error
}

View File

@@ -1,60 +0,0 @@
package coordinator
import (
"time"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
)
type rollerMetrics struct {
rollerProofsVerifiedSuccessTimeTimer geth_metrics.Timer
rollerProofsVerifiedFailedTimeTimer geth_metrics.Timer
rollerProofsGeneratedFailedTimeTimer geth_metrics.Timer
rollerProofsLastAssignedTimestampGauge geth_metrics.Gauge
rollerProofsLastFinishedTimestampGauge geth_metrics.Gauge
}
func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
}
}
}
func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
}
}
}
func (m *Manager) updateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
}
}
}

View File

@@ -1,144 +0,0 @@
package coordinator
import (
"crypto/rand"
"fmt"
"math/big"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
// rollerNode records roller status and send task to connected roller.
type rollerNode struct {
// Roller name
Name string
// Roller type
Type message.ProofType
// Roller public key
PublicKey string
// Roller version
Version string
// task channel
taskChan chan *message.TaskMsg
// session id list which delivered to roller.
TaskIDs cmap.ConcurrentMap
// Time of message creation
registerTime time.Time
metrics *rollerMetrics
}
func (r *rollerNode) sendTask(msg *message.TaskMsg) bool {
select {
case r.taskChan <- msg:
r.TaskIDs.Set(msg.ID, struct{}{})
default:
log.Warn("roller channel is full", "roller name", r.Name, "public key", r.PublicKey)
return false
}
return true
}
func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
m.mu.RLock()
defer m.mu.RUnlock()
taskIDs := cmap.New()
for id, sess := range m.sessions {
for _, proverTask := range sess.proverTasks {
if proverTask.ProverPublicKey == pubkey && proverTask.ProvingStatus == int16(types.RollerAssigned) {
taskIDs.Set(id, struct{}{})
}
}
}
return &taskIDs
}
func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
node, ok := m.rollerPool.Get(pubkey)
if !ok {
taskIDs := m.reloadRollerAssignedTasks(pubkey)
rMs := &rollerMetrics{
rollerProofsVerifiedSuccessTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsVerifiedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsGeneratedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsLastAssignedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", pubkey), metrics.ScrollRegistry),
rollerProofsLastFinishedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", pubkey), metrics.ScrollRegistry),
}
node = &rollerNode{
Name: identity.Name,
Type: identity.RollerType,
Version: identity.Version,
PublicKey: pubkey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
metrics: rMs,
}
m.rollerPool.Set(pubkey, node)
}
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "prover_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
roller.registerTime = time.Now()
return roller.taskChan, nil
}
func (m *Manager) freeRoller(pk string) {
m.rollerPool.Pop(pk)
}
func (m *Manager) existTaskIDForRoller(pk string, id string) bool {
if node, ok := m.rollerPool.Get(pk); ok {
r := node.(*rollerNode)
return r.TaskIDs.Has(id)
}
return false
}
func (m *Manager) freeTaskIDForRoller(pk string, id string) {
if node, ok := m.rollerPool.Get(pk); ok {
r := node.(*rollerNode)
r.TaskIDs.Pop(id)
}
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
for _, pk := range m.rollerPool.Keys() {
if val, ok := m.rollerPool.Get(pk); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
count++
}
}
}
return count
}
func (m *Manager) selectRoller(rollerType message.ProofType) *rollerNode {
pubkeys := m.rollerPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
if val, ok := m.rollerPool.Get(pubkeys[idx.Int64()]); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
return r
}
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}
return nil
}

View File

@@ -1,24 +1,19 @@
package coordinator_test
package test
import (
"compress/flate"
"context"
"crypto/ecdsa"
"crypto/rand"
"encoding/json"
"fmt"
"math/big"
"net/http"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
@@ -26,17 +21,18 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/coordinator"
client2 "scroll-tech/coordinator/client"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/verifier"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/client"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/orm"
)
var (
@@ -65,6 +61,34 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, resetDB bool) (*http.Server, *cron.Collector) {
var err error
db, err = database.InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
if resetDB {
assert.NoError(t, migrate.ResetDB(sqlDB))
}
conf := config.Config{
RollerManagerConfig: &config.RollerManagerConfig{
RollersPerSession: rollersPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
MaxVerifierWorkers: 10,
SessionAttempts: 2,
},
}
proofCollector := cron.NewCollector(context.Background(), db, &conf)
tmpAPI := api.RegisterAPIs(&conf, db)
handler, _, err := utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], tmpAPI, flate.NoCompression)
assert.NoError(t, err)
rollermanager.InitRollerManager(db)
return handler, proofCollector
}
func setEnv(t *testing.T) {
base = docker.NewDockerApp()
base.RunDBImage(t)
@@ -87,13 +111,13 @@ func setEnv(t *testing.T) {
chunkOrm = orm.NewChunk(db)
l2BlockOrm = orm.NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
assert.NoError(t, err)
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
@@ -114,10 +138,9 @@ func TestApis(t *testing.T) {
t.Run("TestValidProof", testValidProof)
t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
t.Run("TestTimedoutProof", testTimedoutProof)
t.Run("TestTimeoutProof", testTimeoutProof)
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
t.Run("TestGracefulRestart", testGracefulRestart)
t.Run("TestListRollers", testListRollers)
// Teardown
t.Cleanup(func() {
@@ -128,10 +151,10 @@ func TestApis(t *testing.T) {
func testHandshake(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
proofCollector.Stop()
}()
roller1 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
@@ -140,17 +163,17 @@ func testHandshake(t *testing.T) {
roller2 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
defer roller2.close()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
}
func testFailedHandshake(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
proofCollector.Stop()
}()
// prepare
@@ -160,7 +183,7 @@ func testFailedHandshake(t *testing.T) {
// Try to perform handshake without token
// create a new ws connection
client, err := client2.DialContext(ctx, wsURL)
c, err := client.DialContext(ctx, wsURL)
assert.NoError(t, err)
// create private key
privkey, err := crypto.GenerateKey()
@@ -172,12 +195,12 @@ func testFailedHandshake(t *testing.T) {
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
// Try to perform handshake with timeouted token
// create a new ws connection
client, err = client2.DialContext(ctx, wsURL)
c, err = client.DialContext(ctx, wsURL)
assert.NoError(t, err)
// create private key
privkey, err = crypto.GenerateKey()
@@ -189,26 +212,25 @@ func testFailedHandshake(t *testing.T) {
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
token, err := client.RequestToken(ctx, authMsg)
token, err := c.RequestToken(ctx, authMsg)
assert.NoError(t, err)
authMsg.Identity.Token = token
assert.NoError(t, authMsg.SignWithKey(privkey))
<-time.After(6 * time.Second)
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 0, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
}
func testSeveralConnections(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
proofCollector.Stop()
}()
var (
@@ -227,8 +249,8 @@ func testSeveralConnections(t *testing.T) {
assert.NoError(t, eg.Wait())
// check roller's idle connections
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, batch/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, batch/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
// close connection
for _, roller := range rollers {
@@ -237,12 +259,12 @@ func testSeveralConnections(t *testing.T) {
var (
tick = time.Tick(time.Second)
tickStop = time.Tick(time.Second * 15)
tickStop = time.Tick(time.Minute)
)
for {
select {
case <-tick:
if rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
return
}
case <-tickStop:
@@ -253,12 +275,11 @@ func testSeveralConnections(t *testing.T) {
}
func testValidProof(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
handler, collector := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create mock rollers.
@@ -286,8 +307,8 @@ func testValidProof(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -301,7 +322,7 @@ func testValidProof(t *testing.T) {
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
tickStop = time.Tick(time.Minute)
)
for {
select {
@@ -323,10 +344,10 @@ func testValidProof(t *testing.T) {
func testInvalidProof(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
handler, collector := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create mock rollers.
@@ -347,8 +368,8 @@ func testInvalidProof(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -356,13 +377,13 @@ func testInvalidProof(t *testing.T) {
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
tickStop = time.Tick(time.Minute)
)
for {
select {
@@ -384,10 +405,10 @@ func testInvalidProof(t *testing.T) {
func testProofGeneratedFailed(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
handler, collector := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create mock rollers.
@@ -408,8 +429,8 @@ func testProofGeneratedFailed(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -417,13 +438,13 @@ func testProofGeneratedFailed(t *testing.T) {
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
tickStop = time.Tick(time.Minute)
)
for {
select {
@@ -442,13 +463,13 @@ func testProofGeneratedFailed(t *testing.T) {
}
}
func testTimedoutProof(t *testing.T) {
func testTimeoutProof(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, collector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create first chunk & batch mock roller, that will not send any proof.
@@ -459,8 +480,8 @@ func testTimedoutProof(t *testing.T) {
chunkRoller1.close()
batchRoller1.close()
}()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -468,7 +489,7 @@ func testTimedoutProof(t *testing.T) {
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// verify proof status, it should be assigned, because roller didn't send any proof
@@ -495,8 +516,8 @@ func testTimedoutProof(t *testing.T) {
chunkRoller2.close()
batchRoller2.close()
}()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
// verify proof status, it should be verified now, because second roller sent valid proof
ok = utils.TryTimes(200, func() bool {
@@ -516,10 +537,10 @@ func testTimedoutProof(t *testing.T) {
func testIdleRollerSelection(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, collector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create mock rollers.
@@ -541,8 +562,8 @@ func testIdleRollerSelection(t *testing.T) {
}
}()
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, len(rollers)/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, len(rollers)/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -578,7 +599,7 @@ func testIdleRollerSelection(t *testing.T) {
func testGracefulRestart(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, collector := setupCoordinator(t, 1, wsURL, true)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -602,25 +623,21 @@ func testGracefulRestart(t *testing.T) {
chunkRoller.close()
batchRoller.close()
info, err := rollerManager.GetSessionInfo(dbChunk.Hash)
provingStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
assert.Equal(t, types.ProvingTaskAssigned, provingStatus)
// Close rollerManager and ws handler.
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
// Setup new coordinator and ws server.
newRollerManager, newHandler := setupCoordinator(t, 1, wsURL, false)
newHandler, newCollector := setupCoordinator(t, 1, wsURL, false)
defer func() {
newHandler.Shutdown(context.Background())
newRollerManager.Stop()
newCollector.Stop()
}()
info, err = newRollerManager.GetSessionInfo(dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
// at this point, roller haven't submitted
status, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
@@ -660,219 +677,3 @@ func testGracefulRestart(t *testing.T) {
}
}
}
func testListRollers(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
var names = []string{
"roller_test_1",
"roller_test_2",
"roller_test_3",
"roller_test_4",
}
roller1 := newMockRoller(t, names[0], wsURL, message.ProofTypeChunk)
roller2 := newMockRoller(t, names[1], wsURL, message.ProofTypeBatch)
roller3 := newMockRoller(t, names[2], wsURL, message.ProofTypeChunk)
roller4 := newMockRoller(t, names[3], wsURL, message.ProofTypeBatch)
defer func() {
roller1.close()
roller2.close()
}()
// test ListRollers API
rollers, err := rollerManager.ListRollers()
assert.NoError(t, err)
var rollersName []string
for _, roller := range rollers {
rollersName = append(rollersName, roller.Name)
}
sort.Strings(rollersName)
assert.True(t, reflect.DeepEqual(names, rollersName))
// test ListRollers if two rollers closed.
roller3.close()
roller4.close()
// wait coordinator free completely
time.Sleep(time.Second * 5)
rollers, err = rollerManager.ListRollers()
assert.NoError(t, err)
var newRollersName []string
for _, roller := range rollers {
newRollersName = append(newRollersName, roller.Name)
}
sort.Strings(newRollersName)
assert.True(t, reflect.DeepEqual(names[:2], newRollersName))
}
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, resetDB bool) (rollerManager *coordinator.Manager, handler *http.Server) {
db, err := database.InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
if resetDB {
assert.NoError(t, migrate.ResetDB(sqlDB))
}
rollerManager, err = coordinator.New(context.Background(), &config.RollerManagerConfig{
RollersPerSession: rollersPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
MaxVerifierWorkers: 10,
SessionAttempts: 2,
}, db)
assert.NoError(t, err)
assert.NoError(t, rollerManager.Start())
// start ws service
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
assert.NoError(t, err)
return rollerManager, handler
}
type mockRoller struct {
rollerName string
privKey *ecdsa.PrivateKey
proofType message.ProofType
wsURL string
client *client2.Client
taskCh chan *message.TaskMsg
taskCache sync.Map
sub ethereum.Subscription
stopCh chan struct{}
}
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
roller := &mockRoller{
rollerName: rollerName,
privKey: privKey,
proofType: proofType,
wsURL: wsURL,
taskCh: make(chan *message.TaskMsg, 4),
stopCh: make(chan struct{}),
}
roller.client, roller.sub, err = roller.connectToCoordinator()
assert.NoError(t, err)
return roller
}
// connectToCoordinator sets up a websocket client to connect to the roller manager.
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
// Create connection.
client, err := client2.Dial(r.wsURL)
if err != nil {
return nil, nil, err
}
// create a new ws connection
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
RollerType: r.proofType,
},
}
_ = authMsg.SignWithKey(r.privKey)
token, err := client.RequestToken(context.Background(), authMsg)
if err != nil {
return nil, nil, err
}
authMsg.Identity.Token = token
_ = authMsg.SignWithKey(r.privKey)
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
if err != nil {
return nil, nil, err
}
return client, sub, nil
}
func (r *mockRoller) releaseTasks() {
r.taskCache.Range(func(key, value any) bool {
r.taskCh <- value.(*message.TaskMsg)
r.taskCache.Delete(key)
return true
})
}
type proofStatus uint32
const (
verifiedSuccess proofStatus = iota
verifiedFailed
generatedFailed
)
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
if reconnect {
var err error
r.client, r.sub, err = r.connectToCoordinator()
if err != nil {
t.Fatal(err)
return
}
}
// Release cached tasks.
r.releaseTasks()
r.stopCh = make(chan struct{})
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
}
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
r.taskCache.Store(task.ID, task)
// simulate proof time
select {
case <-time.After(proofTime):
case <-stopCh:
return
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: task.ID,
Type: r.proofType,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
if proofStatus == generatedFailed {
proof.Status = message.StatusProofError
} else if proofStatus == verifiedFailed {
proof.ProofDetail.Proof.Proof = []byte(verifier.InvalidTestProof)
}
assert.NoError(t, proof.Sign(r.privKey))
assert.NoError(t, client.SubmitProof(context.Background(), proof))
case <-stopCh:
return
}
}
}
func (r *mockRoller) close() {
close(r.stopCh)
r.sub.Unsubscribe()
}

View File

@@ -0,0 +1,156 @@
package test
import (
"context"
"crypto/ecdsa"
"sync"
"testing"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/message"
client2 "scroll-tech/coordinator/client"
"scroll-tech/coordinator/internal/logic/verifier"
)
type proofStatus uint32
const (
verifiedSuccess proofStatus = iota
verifiedFailed
generatedFailed
)
type mockRoller struct {
rollerName string
privKey *ecdsa.PrivateKey
proofType message.ProofType
wsURL string
client *client2.Client
taskCh chan *message.TaskMsg
taskCache sync.Map
sub ethereum.Subscription
stopCh chan struct{}
}
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
roller := &mockRoller{
rollerName: rollerName,
privKey: privKey,
proofType: proofType,
wsURL: wsURL,
taskCh: make(chan *message.TaskMsg, 4),
stopCh: make(chan struct{}),
}
roller.client, roller.sub, err = roller.connectToCoordinator()
assert.NoError(t, err)
return roller
}
// connectToCoordinator sets up a websocket client to connect to the roller manager.
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
// Create connection.
client, err := client2.Dial(r.wsURL)
if err != nil {
return nil, nil, err
}
// create a new ws connection
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
RollerType: r.proofType,
},
}
_ = authMsg.SignWithKey(r.privKey)
token, err := client.RequestToken(context.Background(), authMsg)
if err != nil {
return nil, nil, err
}
authMsg.Identity.Token = token
_ = authMsg.SignWithKey(r.privKey)
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
if err != nil {
return nil, nil, err
}
return client, sub, nil
}
func (r *mockRoller) releaseTasks() {
r.taskCache.Range(func(key, value any) bool {
r.taskCh <- value.(*message.TaskMsg)
r.taskCache.Delete(key)
return true
})
}
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
if reconnect {
var err error
r.client, r.sub, err = r.connectToCoordinator()
if err != nil {
t.Fatal(err)
return
}
}
// Release cached tasks.
r.releaseTasks()
r.stopCh = make(chan struct{})
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
}
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
r.taskCache.Store(task.ID, task)
// simulate proof time
select {
case <-time.After(proofTime):
case <-stopCh:
return
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: task.ID,
Type: r.proofType,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
if proofStatus == generatedFailed {
proof.Status = message.StatusProofError
} else if proofStatus == verifiedFailed {
proof.ProofDetail.Proof.Proof = []byte(verifier.InvalidTestProof)
}
assert.NoError(t, proof.Sign(r.privKey))
assert.NoError(t, client.SubmitProof(context.Background(), proof))
case <-stopCh:
return
}
}
}
func (r *mockRoller) close() {
close(r.stopCh)
r.sub.Unsubscribe()
}

545
coordinator/testdata/blockTrace_02.json vendored Normal file
View File

@@ -0,0 +1,545 @@
{
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"header": {
"parentHash": "0xe17f08d25ef61a8ee12aa29704b901345a597f5e45a9a0f603ae0f70845b54dc",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"transactionsRoot": "0x3057754c197f33e1fe799e996db6232b5257412feea05b3c1754738f0b33fe32",
"receiptsRoot": "0xd95b673818fa493deec414e01e610d97ee287c9421c8eff4102b1647c1a184e4",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x2",
"gasLimit": "0x355418d1e8184",
"gasUsed": "0xa410",
"timestamp": "0x63807b2a",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000004b54a94f0df14333e63c8a13dfe6097c1a08b5fd2c225a8dc0f199dae245aead55d6f774a980a0c925be407748d56a14106afda7ddc1dec342e7ee3b0d58a8df01",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x1de9",
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
},
"transactions": [
{
"type": 0,
"nonce": 0,
"txHash": "0xb2febc1213baec968f6575789108e175273b8da8f412468098893084229f1542",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514",
"s": "0x34cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b"
},
{
"type": 0,
"nonce": 1,
"txHash": "0xe6ac2ffc543d07f1e280912a2abe3aa659bf83773740681151297ada1bb211dd",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xf039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316",
"s": "0x5a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1"
}
],
"storageTrace": {
"rootBefore": "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
"rootAfter": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"proofs": {
"0x01bae6BF68E9A03Fb2bc0615b1bf0d69ce9411eD": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0xc0c4C8bAEA3f6Acb49b6E1fb9e2ADEcEeaCB0cA2": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": [
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
},
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
}
],
"mptwitness": [
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
},
{
"pathPart": "0x3",
"root": "0xaf16fd780a8c7616b95b20da69f4ff26e0253238e996f9516445d6d6bf92b725",
"path": [
{
"value": "0x5bbe97e7e66485b203f9dfea64eb7fa7df06959b12cbde2beba14f8f91133a13",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
},
{
"value": "0x2e591357b02ab3117c35ad94a4e1a724fdbd95d6463da1f6c8017e6d000ecf02",
"sibling": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
{
"value": "0x794953bb5d8aa00f90383ff435ce2ea58e30e1da1061e69455c38496766ec10f",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
}
]
}

12877
coordinator/testdata/blockTrace_03.json vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -4,7 +4,7 @@ go 1.19
require (
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/lib/pq v1.10.9
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/stretchr/testify v1.8.3
@@ -14,11 +14,11 @@ require (
require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect

View File

@@ -4,8 +4,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
@@ -20,13 +20,13 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=

View File

@@ -16,8 +16,9 @@ create table prover_task
-- status
proving_status SMALLINT NOT NULL DEFAULT 0,
failure_type SMALLINT NOT NULL DEFAULT 0,
reward DECIMAL(78, 0) NOT NULL DEFAULT 0,
reward DECIMAL(78, 0) NOT NULL DEFAULT 0,
proof BYTEA DEFAULT NULL,
assigned_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
-- metadata
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,

View File

@@ -12,8 +12,12 @@ github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA=
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.2/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 h1:E+m3SkZCN0Bf5q7YdTs5lSm2CYY3CK4spn5OmUIiQtk=
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck=
@@ -41,11 +45,14 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0=
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
github.com/ClickHouse/ch-go v0.55.0/go.mod h1:kQT2f+yp2p+sagQA/7kS6G3ukym+GQ5KAu1kuFAFDiU=
github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJerTHwaBxut1C0=
github.com/ClickHouse/clickhouse-go/v2 v2.2.0/go.mod h1:8f2XZUi7XoeU+uPIytSi1cvx8fmJxi7vIgqpvYTF1+o=
github.com/ClickHouse/clickhouse-go/v2 v2.9.1/go.mod h1:teXfZNM90iQ99Jnuht+dxQXCuhDZ8nvvMoTJOFrcmcg=
github.com/CloudyKit/jet/v3 v3.0.0 h1:1PwO5w5VCtlUUl+KTOBsTGZlhjWkcybsGaAau52tOy8=
github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
@@ -111,6 +118,7 @@ github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk
github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
@@ -141,6 +149,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw=
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
github.com/dave/jennifer v1.2.0 h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc=
github.com/dchest/blake512 v1.0.0 h1:oDFEQFIqFSeuA34xLtXZ/rWxCXdSjirjzPhey5EUvmA=
@@ -172,6 +181,7 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v23.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
@@ -184,7 +194,12 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0Nkk
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
github.com/elastic/go-sysinfo v1.8.1/go.mod h1:JfllUnzoQV/JRYymbH3dO1yggI3mV2oTKSXsDHM+uIM=
github.com/elastic/go-sysinfo v1.10.1/go.mod h1:QElTrQ6akcnAVCRwdkZtoAkwuTv8UVM4+qe0hPxT4NU=
github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU=
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00=
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
@@ -218,6 +233,8 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy
github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=
github.com/go-chi/chi/v5 v5.0.0 h1:DBPx88FjZJH3FsICfDAfIfnb7XxKIYVGG6lOPlhENAg=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE=
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
@@ -245,6 +262,8 @@ github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0
github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
@@ -255,11 +274,13 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219 h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38 h1:y0Wmhvml7cGnzPa9nocn/fMraMH/lMDdeG+rkx4VgYY=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
@@ -271,6 +292,7 @@ github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
@@ -278,10 +300,12 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gotestyourself/gotestyourself v1.4.0 h1:CDSlSIuRL/Fsc72Ln5lMybtrCvSRDddsHsDRG/nP7Rg=
github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
@@ -297,6 +321,7 @@ github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6Pb
github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/influxdata/flux v0.65.1 h1:77BcVUCzvN5HMm8+j9PRBQ4iZcu98Dl4Y9rf+J5vhnc=
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385 h1:ED4e5Cc3z5vSN2Tz2GkOHN7vs4Sxe2yds6CXvDnvZFE=
@@ -324,11 +349,19 @@ github.com/jackc/pgx/v4 v4.17.0 h1:Hsx+baY8/zU2WtPLQyZi8WbecgcsWEeyoK1jvg/WgIo=
github.com/jackc/pgx/v4 v4.17.0/go.mod h1:Gd6RmOhtFLTu8cp/Fhq4kP195KrshxYJH3oW8AWJ1pw=
github.com/jackc/puddle/v2 v2.2.0 h1:RdcDk92EJBuBS55nQMMYFXTxwstHug4jkhT5pq8VxPk=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89 h1:12K8AlpT0/6QUXSfV0yi4Q0jkbq8NDtIKFtF61AoqV0=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=
@@ -350,8 +383,11 @@ github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHz
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10=
@@ -376,14 +412,18 @@ github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2 h1:JAEbJn3j/FrhdWA9jW8
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U=
github.com/mattn/goveralls v0.0.2 h1:7eJB6EqsPhRVxvwEXGnqdO2sJI0PTsrWoTMXEk9/OQc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/microsoft/go-mssqldb v0.21.0/go.mod h1:+4wZTUnz/SV6nffv+RRRB/ss8jPng5Sho2SmM1l2ts4=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/mmcloughlin/profile v0.1.1 h1:jhDmAqPyebOsVDOCICJoINoLb/AnLBaUw58nFzxWS2w=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
@@ -406,11 +446,15 @@ github.com/onsi/ginkgo/v2 v2.8.1 h1:xFTEVwOFa1D/Ty24Ws1npBWkDYEV9BqZrsDxVrVkrrU=
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY=
github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM=
github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs=
github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU=
github.com/paulmach/orb v0.7.1/go.mod h1:FWRlTgl88VI1RBx/MkrwWDRhQ96ctqMCh8boXhmqB/A=
github.com/paulmach/orb v0.9.2/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
@@ -418,19 +462,28 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/pressly/goose/v3 v3.11.2 h1:QgTP45FhBBHdmf7hWKlbWFHtwPtxo0phSDkwDKGUrYs=
github.com/pressly/goose/v3 v3.11.2/go.mod h1:LWQzSc4vwfHA/3B8getTp8g3J5Z8tFBxgxinmGlMlJk=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c=
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=
github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U=
@@ -444,6 +497,7 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod
github.com/scroll-tech/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
@@ -470,6 +524,7 @@ github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
@@ -479,36 +534,51 @@ github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:s
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo=
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=
github.com/valyala/fasthttp v1.40.0 h1:CRq/00MfruPGFLTQKY8b+8SfdK60TxNztjRMnH0t1Yc=
github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
github.com/vertica/vertica-sql-go v1.3.2/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4=
github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg=
go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw=
go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo=
go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc=
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
@@ -519,25 +589,31 @@ golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
@@ -549,9 +625,12 @@ golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoyEHbDGPUWjIbnqdqqe1k=
@@ -564,7 +643,9 @@ google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@@ -573,22 +654,34 @@ gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2G
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v1.4.0 h1:BjtEgfuw8Qyd+jPvQz8CfoxiO/UjFEidWinwEXZiWv0=
gotest.tools v1.4.0/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.1/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
modernc.org/sqlite v1.22.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk=
modernc.org/strutil v1.1.2/go.mod h1:OYajnUAcI/MX+XD/Wx7v1bbdvcQSvxgtb0gC+u3d3eg=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE=
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=

View File

@@ -5,16 +5,16 @@ import (
"math/big"
"testing"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/prover-stats-api/internal/config"
"scroll-tech/prover-stats-api/internal/orm"
)

View File

@@ -25,6 +25,7 @@ type ProverTask struct {
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
Reward decimal.Decimal `json:"reward" gorm:"column:reward;default:0;type:decimal(78)"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
AssignedAt time.Time `json:"assigned_at" gorm:"assigned_at"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
@@ -84,12 +85,16 @@ func (o *ProverTask) GetProverTasksByHash(ctx context.Context, hash string) (*Pr
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask) error {
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
db := o.db.WithContext(ctx)
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"proving_status"}),
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {

View File

@@ -7,6 +7,12 @@ import (
"io"
"math/big"
"net/http"
"testing"
"github.com/gin-gonic/gin"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
@@ -14,19 +20,11 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/types"
"github.com/gin-gonic/gin"
"testing"
"scroll-tech/prover-stats-api/internal/config"
"scroll-tech/prover-stats-api/internal/controller"
"scroll-tech/prover-stats-api/internal/orm"
"scroll-tech/prover-stats-api/internal/route"
api_types "scroll-tech/prover-stats-api/internal/types"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
apitypes "scroll-tech/prover-stats-api/internal/types"
)
var (
@@ -113,10 +111,10 @@ func getResp(t *testing.T, url string) interface{} {
byt, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
res := new(api_types.Response)
res := new(apitypes.Response)
assert.NoError(t, json.Unmarshal(byt, res))
t.Log("----byt is ", string(byt))
assert.Equal(t, api_types.Success, res.ErrCode)
assert.Equal(t, apitypes.Success, res.ErrCode)
return res.Data
}

View File

@@ -33,7 +33,7 @@ var (
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/conf/config.json")
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/config.json")
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
rollerApp = rapp.NewRollerApp(base, "../../roller/config.json", coordinatorApp.WSEndpoint())
m.Run()
bridgeApp.Free()