Compare commits

...

40 Commits

Author SHA1 Message Date
HAOYUatHZ
117b451bd4 WIP 2023-07-21 18:33:42 +08:00
HAOYUatHZ
6850150e5d WIP 2023-07-21 16:39:24 +08:00
georgehao
f4c0b075b0 feat: update 2023-07-21 16:05:45 +08:00
georgehao
477d13212e feat: fix unit test 2023-07-21 16:00:11 +08:00
georgehao
5ebc6ae531 feat: fix get all prover task erorr 2023-07-21 15:48:52 +08:00
georgehao
3caeab8f7d feat: update 2023-07-21 15:33:55 +08:00
georgehao
d4c552a751 feat: resolve conflict 2023-07-21 15:32:27 +08:00
georgehao
664af271d2 feat: add reloadRollerAssignedTasks 2023-07-21 15:24:36 +08:00
georgehao
3980399599 feat: remove redundant code 2023-07-21 14:36:19 +08:00
georgehao
92f41b08d7 feat: scrollfmt 2023-07-21 10:41:53 +08:00
georgehao
3d983c3053 feat: resolve conflict 2023-07-21 10:30:32 +08:00
georgehao
a1422a7ed6 feat: address comments 2023-07-21 10:28:30 +08:00
georgehao
e15d7d8fc3 feat: update 2023-07-18 19:34:36 +08:00
georgehao
d0979a821c feat: update 2023-07-18 19:09:21 +08:00
georgehao
4057425746 feat: remove repeat type 2023-07-18 19:03:15 +08:00
georgehao
e84f8c359f feat: update 2023-07-18 18:31:32 +08:00
georgehao
090ae5c29c feat: update 2023-07-18 18:27:19 +08:00
georgehao
1af1541a09 feat: address comments 2023-07-18 18:18:37 +08:00
georgehao
cd2b758b97 Merge branch 'develop' into feat/refactor-coordinator 2023-07-17 11:28:32 +08:00
georgehao
ee05ff1be5 feat: fix coordinator TestRoller_SubmitProof test failure 2023-07-17 11:23:21 +08:00
georgehao
9979cec2a2 feat: fix integration test failure 2023-07-17 10:58:18 +08:00
georgehao
6b149ce3e4 feat: update 2023-07-16 23:17:51 +08:00
georgehao
a2ca7ce9a1 feat: update 2023-07-16 22:57:50 +08:00
georgehao
38f48744c2 feat: update 2023-07-16 22:54:34 +08:00
georgehao
5849862967 feat: recover config 2023-07-16 22:50:17 +08:00
georgehao
904453cb87 feat: fix coordinator unit test failure 2023-07-16 22:42:31 +08:00
georgehao
2b1de651b3 feat: update 2023-07-16 22:36:33 +08:00
georgehao
503a856ced feat: fix lint check 2023-07-16 22:32:06 +08:00
georgehao
ed67808672 feat: fmt 2023-07-16 22:20:06 +08:00
georgehao
9d243960cf feat: fix testTimeoutProof unit test 2023-07-16 22:10:42 +08:00
georgehao
1026d4883d feat: fix testInvalidProof test failure 2023-07-16 16:29:03 +08:00
georgehao
43266d2885 feat: fix TestValidProof test failure 2023-07-16 15:38:09 +08:00
georgehao
453c645bea feat: add start successful log 2023-07-14 18:13:14 +08:00
georgehao
0eaea3985e feat: remove unused code 2023-07-14 18:07:15 +08:00
georgehao
e68dabb035 feat: sync 2023-07-14 17:17:40 +08:00
georgehao
6df380f15e feat: fix conflict of some test 2023-07-14 17:11:46 +08:00
georgehao
181e547c22 feat: resolve conflict 2023-07-14 15:59:34 +08:00
georgehao
eb87ad3da0 feat: regulate proof some logic 2023-07-14 15:55:42 +08:00
georgehao
deaf766a81 feat: fix coordinator mock_app 2023-07-13 16:50:03 +08:00
georgehao
3b2fa02cd7 feat: resolve conflict 2023-07-13 16:29:28 +08:00
49 changed files with 15622 additions and 1927 deletions

View File

@@ -7,7 +7,7 @@ require (
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
github.com/jmoiron/sqlx v1.3.5
github.com/kataras/iris/v12 v12.2.0
github.com/lib/pq v1.10.7
github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19
github.com/modern-go/reflect2 v1.0.2
@@ -44,7 +44,7 @@ require (
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v20.10.21+incompatible // indirect
github.com/docker/docker v23.0.6+incompatible // indirect
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
github.com/fatih/structs v1.1.0 // indirect
@@ -54,7 +54,7 @@ require (
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/getsentry/sentry-go v0.18.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gobwas/httphead v0.1.0 // indirect
github.com/gobwas/pool v0.2.1 // indirect
@@ -91,7 +91,7 @@ require (
github.com/mailgun/raymond/v2 v2.0.48 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mediocregopher/radix/v3 v3.8.1 // indirect
github.com/microcosm-cc/bluemonday v1.0.23 // indirect

View File

@@ -97,8 +97,8 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
@@ -144,8 +144,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@@ -303,8 +303,8 @@ github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awS
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
@@ -333,8 +333,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=

View File

@@ -75,6 +75,10 @@ linters-settings:
# report about shadowed variables
check-shadowing: true
gosec:
disable:
- G108
golint:
# minimal confidence for issues, default is 0.8
min-confidence: 0.8
@@ -227,7 +231,12 @@ issues:
- lll
source: "^//go:generate "
text: "long-lines"
# Exclude gosec issues for G108: Profiling endpoint is automatically exposed
- linters:
- gosec
text: "G108"
- linters:
- wsl
text: "return statements should not be cuddled if block has more than two lines"

View File

@@ -49,6 +49,10 @@ func InitDB(config *Config) (*gorm.DB, error) {
db, err := gorm.Open(postgres.Open(config.DSN), &gorm.Config{
Logger: &tmpGormLogger,
NowFunc: func() time.Time {
utc, _ := time.LoadLocation("")
return time.Now().In(utc)
},
})
if err != nil {
return nil, err

View File

@@ -7,6 +7,7 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
@@ -65,8 +66,12 @@ func (i *ImgDB) Stop() error {
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
return err
}
// remove the stopped container.

View File

@@ -9,6 +9,7 @@ import (
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/common/cmd"
@@ -135,8 +136,11 @@ func (i *ImgGeth) Stop() error {
// check if container is running, stop the running container.
id := GetContainerID(i.name)
if id != "" {
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
return err
}
i.id = id

View File

@@ -3,9 +3,9 @@ module scroll-tech/common
go 1.19
require (
github.com/docker/docker v20.10.21+incompatible
github.com/docker/docker v23.0.6+incompatible
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19
github.com/modern-go/reflect2 v1.0.2
@@ -18,7 +18,8 @@ require (
)
require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
@@ -36,7 +37,7 @@ require (
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
@@ -55,23 +56,23 @@ require (
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.3.0 // indirect
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/moby/term v0.5.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.27.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect
@@ -84,7 +85,6 @@ require (
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.5.3 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect

View File

@@ -18,13 +18,13 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
@@ -63,7 +63,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -80,8 +79,8 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -121,8 +120,8 @@ github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
@@ -216,8 +215,9 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0 h1:/NQi8KHMpKWHInxXesC8yD4DhkXPrVhmnwYkjp9AmBA=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/pgx/v5 v5.3.1 h1:Fcr8QJ1ZeLi5zsPZqQeUZhNhxfkkKBOgJuYkJHoBOtU=
github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
@@ -263,8 +263,8 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
@@ -287,8 +287,8 @@ github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWV
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -296,8 +296,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI=
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
@@ -325,8 +325,8 @@ github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754=
github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
@@ -384,8 +384,6 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@@ -563,7 +561,6 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -606,7 +603,6 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -699,7 +695,6 @@ gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -95,18 +95,24 @@ func (s RollerProveStatus) String() string {
}
}
// RollerFailureType is the type of a roller session's failure
type RollerFailureType int
// ProverTaskFailureType the type of prover task failure
type ProverTaskFailureType int
const (
// RollerFailureTypeUndefined indicates an unknown roller failure type
RollerFailureTypeUndefined RollerFailureType = iota
// ProverTaskFailureTypeUndefined indicates an unknown roller failure type
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
// ProverTaskFailureTypeTimeout prover task failure of timeout
ProverTaskFailureTypeTimeout
)
func (s RollerFailureType) String() string {
switch s {
func (r ProverTaskFailureType) String() string {
switch r {
case ProverTaskFailureTypeUndefined:
return "prover task failure undefined"
case ProverTaskFailureTypeTimeout:
return "prover task failure timeout"
default:
return fmt.Sprintf("Undefined (%d)", int32(s))
return "illegal prover task failure type"
}
}

View File

@@ -19,8 +19,8 @@ test:
libzkp:
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
find ../common | grep libzktrie.so | xargs -I{} cp {} ./verifier/lib/
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/verifier/lib
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
@@ -29,13 +29,13 @@ mock_coordinator: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
test-verifier: libzkp
go test -tags ffi -timeout 0 -v ./verifier
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
test-gpu-verifier: libzkp
go test -tags="gpu ffi" -timeout 0 -v ./verifier
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
lint: ## Lint the files - used for CI
cp -r ../common/libzkp/interface ./verifier/lib
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
GOBIN=$(PWD)/build/bin go run ../build/lint.go
clean: ## Empty out the bin folder
@@ -45,4 +45,4 @@ docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}

View File

@@ -1,129 +0,0 @@
package coordinator
import (
"context"
"errors"
"fmt"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/metrics"
"scroll-tech/common/types/message"
)
var (
coordinatorRollersDisconnectsTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/rollers/disconnects/total", metrics.ScrollRegistry)
)
// RollerAPI for rollers inorder to register and submit proof
type RollerAPI interface {
RequestToken(authMsg *message.AuthMsg) (string, error)
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
SubmitProof(proof *message.ProofMsg) error
}
// RequestToken generates and sends back register token for roller
func (m *Manager) RequestToken(authMsg *message.AuthMsg) (string, error) {
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return "", errors.New("signature verification failed")
}
pubkey, _ := authMsg.PublicKey()
if token, ok := m.tokenCache.Get(pubkey); ok {
return token.(string), nil
}
token, err := message.GenerateToken()
if err != nil {
return "", errors.New("token generation failed")
}
m.tokenCache.Set(pubkey, token, cache.DefaultExpiration)
return token, nil
}
// Register register api for roller
func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
// Verify register message.
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return nil, errors.New("signature verification failed")
}
pubkey, _ := authMsg.PublicKey()
// Lock here to avoid malicious roller message replay before cleanup of token
m.registerMu.Lock()
if ok, err := m.VerifyToken(authMsg); !ok {
m.registerMu.Unlock()
return nil, err
}
// roller successfully registered, remove token associated with this roller
m.tokenCache.Delete(pubkey)
m.registerMu.Unlock()
// create or get the roller message channel
taskCh, err := m.register(pubkey, authMsg.Identity)
if err != nil {
return nil, err
}
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
rpcSub := notifier.CreateSubscription()
go func() {
defer func() {
m.freeRoller(pubkey)
log.Info("roller unregister", "name", authMsg.Identity.Name, "pubkey", pubkey)
}()
for {
select {
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case err := <-rpcSub.Err():
coordinatorRollersDisconnectsTotalCounter.Inc(1)
log.Warn("client stopped the ws connection", "name", authMsg.Identity.Name, "pubkey", pubkey, "err", err)
return
case <-notifier.Closed():
return
}
}
}()
log.Info("roller register", "name", authMsg.Identity.Name, "pubkey", pubkey, "version", authMsg.Identity.Version)
return rpcSub, nil
}
// SubmitProof roller pull proof
func (m *Manager) SubmitProof(proof *message.ProofMsg) error {
// Verify the signature
if ok, err := proof.Verify(); !ok {
if err != nil {
log.Error("failed to verify proof message", "error", err)
}
return errors.New("auth signature verify fail")
}
pubkey, _ := proof.PublicKey()
// Only allow registered pub-key.
if !m.existTaskIDForRoller(pubkey, proof.ID) {
return fmt.Errorf("the roller or session id doesn't exist, pubkey: %s, ID: %s", pubkey, proof.ID)
}
m.updateMetricRollerProofsLastFinishedTimestampGauge(pubkey)
err := m.handleZkProof(pubkey, proof.ProofDetail)
if err != nil {
return err
}
defer m.freeTaskIDForRoller(pubkey, proof.ID)
return nil
}

View File

@@ -1,98 +0,0 @@
package coordinator
import (
"fmt"
"time"
"scroll-tech/common/types"
)
// RollerDebugAPI roller api interface in order go get debug message.
type RollerDebugAPI interface {
// ListRollers returns all live rollers
ListRollers() ([]*RollerInfo, error)
// GetSessionInfo returns the session information given the session id.
GetSessionInfo(sessionID string) (*SessionInfo, error)
}
// RollerInfo records the roller name, pub key and active session info (id, start time).
type RollerInfo struct {
Name string `json:"name"`
Version string `json:"version"`
PublicKey string `json:"public_key"`
ActiveSession string `json:"active_session,omitempty"`
ActiveSessionStartTime time.Time `json:"active_session_start_time"` // latest proof start time.
}
// SessionInfo records proof create or proof verify failed session.
type SessionInfo struct {
ID string `json:"id"`
Status string `json:"status"`
StartTime time.Time `json:"start_time"`
FinishTime time.Time `json:"finish_time,omitempty"` // set to 0 if not finished
AssignedRollers []string `json:"assigned_rollers,omitempty"` // roller name list
Error string `json:"error,omitempty"` // empty string if no error encountered
}
// ListRollers returns all live rollers.
func (m *Manager) ListRollers() ([]*RollerInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
var res []*RollerInfo
for _, pk := range m.rollerPool.Keys() {
node, exist := m.rollerPool.Get(pk)
if !exist {
continue
}
roller := node.(*rollerNode)
info := &RollerInfo{
Name: roller.Name,
Version: roller.Version,
PublicKey: pk,
}
for id, sess := range m.sessions {
for _, proverTask := range sess.proverTasks {
if proverTask.ProverPublicKey == pk {
info.ActiveSessionStartTime = proverTask.CreatedAt
info.ActiveSession = id
break
}
}
}
res = append(res, info)
}
return res, nil
}
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
now := time.Now()
var nameList []string
for _, proverTask := range sess.proverTasks {
nameList = append(nameList, proverTask.ProverName)
}
info := SessionInfo{
ID: sess.taskID,
Status: status.String(),
AssignedRollers: nameList,
StartTime: sess.proverTasks[0].CreatedAt,
Error: errMsg,
}
if finished {
info.FinishTime = now
}
return &info
}
// GetSessionInfo returns the session information given the session id.
func (m *Manager) GetSessionInfo(sessionID string) (*SessionInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
if info, ok := m.failedSessionInfos[sessionID]; ok {
return info, nil
}
if s, ok := m.sessions[sessionID]; ok {
return newSessionInfo(s, types.ProvingTaskAssigned, "", false), nil
}
return nil, fmt.Errorf("no such session, sessionID: %s", sessionID)
}

View File

@@ -1,218 +0,0 @@
package coordinator
import (
"context"
"errors"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
func geneAuthMsg(t *testing.T) *message.AuthMsg {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test1",
},
}
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
assert.NoError(t, authMsg.SignWithKey(privKey))
return authMsg
}
var rollerManager *Manager
func init() {
rmConfig := config.RollerManagerConfig{}
rmConfig.Verifier = &config.VerifierConfig{MockMode: true}
rollerManager, _ = New(context.Background(), &rmConfig, nil)
}
func TestManager_RequestToken(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_request_token",
},
}
token, err := rollerManager.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token has already been distributed", t, func() {
tmpAuthMsg := geneAuthMsg(t)
key, _ := tmpAuthMsg.PublicKey()
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
rollerManager.tokenCache.Set(key, tokenCacheStored, time.Hour)
token, err := rollerManager.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, token, tokenCacheStored)
})
convey.Convey("token generation failure", t, func() {
tmpAuthMsg := geneAuthMsg(t)
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return "", errors.New("token generation failed")
})
defer patchGuard.Reset()
token, err := rollerManager.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token generation success", t, func() {
tmpAuthMsg := geneAuthMsg(t)
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return tokenCacheStored, nil
})
defer patchGuard.Reset()
token, err := rollerManager.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, tokenCacheStored, token)
})
}
func TestManager_Register(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_register",
},
}
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("verify token failure", t, func() {
tmpAuthMsg := geneAuthMsg(t)
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return false, errors.New("verify token failure")
})
defer patchGuard.Reset()
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("register failure", t, func() {
tmpAuthMsg := geneAuthMsg(t)
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
patchGuard.ApplyPrivateMethod(rollerManager, "register", func(*Manager, string, *message.Identity) (<-chan *message.TaskMsg, error) {
return nil, errors.New("register error")
})
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("notifier failure", t, func() {
tmpAuthMsg := geneAuthMsg(t)
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
return nil, false
})
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
assert.Equal(t, *subscription, rpc.Subscription{})
})
}
func TestManager_SubmitProof(t *testing.T) {
id := "10000"
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: id,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
var rp rollerNode
rp.TaskIDs = cmap.New()
rp.TaskIDs.Set(id, id)
convey.Convey("verify failure", t, func() {
var s *message.ProofMsg
patchGuard := gomonkey.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return false, errors.New("proof verify error")
})
defer patchGuard.Reset()
err := rollerManager.SubmitProof(proof)
assert.Error(t, err)
})
convey.Convey("existTaskIDForRoller failure", t, func() {
var s *cmap.ConcurrentMap
patchGuard := gomonkey.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
return nil, true
})
defer patchGuard.Reset()
var pm *message.ProofMsg
patchGuard.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
return true, nil
})
err := rollerManager.SubmitProof(proof)
assert.Error(t, err)
})
convey.Convey("handleZkProof failure", t, func() {
var pm *message.ProofMsg
patchGuard := gomonkey.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var s cmap.ConcurrentMap
patchGuard.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
return &rp, true
})
patchGuard.ApplyPrivateMethod(rollerManager, "handleZkProof", func(manager *Manager, pk string, msg *message.ProofDetail) error {
return errors.New("handle zk proof error")
})
err := rollerManager.SubmitProof(proof)
assert.Error(t, err)
})
convey.Convey("SubmitProof success", t, func() {
var pm *message.ProofMsg
patchGuard := gomonkey.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var s cmap.ConcurrentMap
patchGuard.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
return &rp, true
})
patchGuard.ApplyPrivateMethod(rollerManager, "handleZkProof", func(manager *Manager, pk string, msg *message.ProofDetail) error {
return nil
})
err := rollerManager.SubmitProof(proof)
assert.NoError(t, err)
})
}

View File

@@ -6,6 +6,9 @@ import (
"os"
"os/signal"
// enable the pprof
_ "net/http/pprof"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -14,8 +17,10 @@ import (
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/logic/rollermanager"
)
var app *cli.App
@@ -37,44 +42,34 @@ func init() {
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// init db handler
subCtx, cancel := context.WithCancel(ctx.Context)
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
proofCollector := cron.NewCollector(subCtx, db, cfg)
rollermanager.InitRollerManager(db)
defer func() {
proofCollector.Stop()
cancel()
if err = database.CloseDB(db); err != nil {
log.Error("can not close ormFactory", "error", err)
log.Error("can not close db connection", "error", err)
}
}()
subCtx, cancel := context.WithCancel(ctx.Context)
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(subCtx, cfg.RollerManagerConfig, db)
defer func() {
cancel()
rollerManager.Stop()
}()
if err != nil {
return err
}
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Start all modules.
if err = rollerManager.Start(); err != nil {
log.Crit("couldn't start roller manager", "error", err)
}
apis := rollerManager.APIs()
apis := api.RegisterAPIs(cfg, db)
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(fmt.Sprintf("%s:%d", ctx.String(httpListenAddrFlag.Name), ctx.Int(httpPortFlag.Name)), apis)
@@ -89,8 +84,7 @@ func action(ctx *cli.Context) error {
}
// Register api and start ws service.
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)),
apis, cfg.RollerManagerConfig.CompressionLevel)
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)), apis, cfg.RollerManagerConfig.CompressionLevel)
if err != nil {
log.Crit("Could not start WS api", "error", err)
}

View File

@@ -10,7 +10,6 @@ require (
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
golang.org/x/exp v0.0.0-20230206171751-46f607a40771
golang.org/x/sync v0.3.0
gorm.io/gorm v1.25.2
)

View File

@@ -109,8 +109,6 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -0,0 +1,115 @@
package api
import (
"context"
"errors"
"fmt"
"time"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/proof"
)
// RollerController the roller api controller
type RollerController struct {
tokenCache *cache.Cache
proofReceiver *proof.ZKProofReceiver
taskWorker *proof.TaskWorker
}
// NewRollerController create a roller controller
func NewRollerController(cfg *config.RollerManagerConfig, db *gorm.DB) *RollerController {
return &RollerController{
proofReceiver: proof.NewZKProofReceiver(cfg, db),
taskWorker: proof.NewTaskWorker(),
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
}
}
// RequestToken get request token of authMsg
func (r *RollerController) RequestToken(authMsg *message.AuthMsg) (string, error) {
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return "", errors.New("signature verification failed")
}
pubkey, err := authMsg.PublicKey()
if err != nil {
return "", fmt.Errorf("RequestToken auth msg public key error:%w", err)
}
if token, ok := r.tokenCache.Get(pubkey); ok {
return token.(string), nil
}
token, err := message.GenerateToken()
if err != nil {
return "", errors.New("token generation failed")
}
r.tokenCache.SetDefault(pubkey, token)
return token, nil
}
// VerifyToken verifies pubkey for token and expiration time
func (r *RollerController) verifyToken(authMsg *message.AuthMsg) (bool, error) {
pubkey, err := authMsg.PublicKey()
if err != nil {
return false, fmt.Errorf("verify token auth msg public key error:%w", err)
}
// GetValue returns nil if value is expired
if token, ok := r.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
return false, fmt.Errorf("failed to find corresponding token. roller name: %s roller pk: %s", authMsg.Identity.Name, pubkey)
}
return true, nil
}
// Register register api for roller
func (r *RollerController) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
// Verify register message.
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return nil, errors.New("signature verification failed")
}
// Lock here to avoid malicious roller message replay before cleanup of token
if ok, err := r.verifyToken(authMsg); !ok {
return nil, err
}
pubkey, err := authMsg.PublicKey()
if err != nil {
return nil, fmt.Errorf("register auth msg public key error:%w", err)
}
// roller successfully registered, remove token associated with this roller
r.tokenCache.Delete(pubkey)
rpcSub, err := r.taskWorker.AllocTaskWorker(ctx, authMsg)
if err != nil {
return rpcSub, err
}
return rpcSub, nil
}
// SubmitProof roller pull proof
func (r *RollerController) SubmitProof(proof *message.ProofMsg) error {
// Verify the signature
if ok, err := proof.Verify(); !ok {
if err != nil {
log.Error("failed to verify proof message", "error", err)
}
return errors.New("auth signature verify fail")
}
err := r.proofReceiver.HandleZkProof(context.Background(), proof)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,303 @@
package api
import (
"context"
"crypto/ecdsa"
"database/sql"
"errors"
"fmt"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/proof"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
func geneAuthMsg(t *testing.T) (*message.AuthMsg, *ecdsa.PrivateKey) {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test1",
},
}
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
assert.NoError(t, authMsg.SignWithKey(privKey))
return authMsg, privKey
}
var rollerController *RollerController
func init() {
conf := &config.RollerManagerConfig{
TokenTimeToLive: 120,
}
conf.Verifier = &config.VerifierConfig{MockMode: true}
rollerController = NewRollerController(conf, nil)
}
func TestRoller_RequestToken(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_request_token",
},
}
token, err := rollerController.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token has already been distributed", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
key, _ := tmpAuthMsg.PublicKey()
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
rollerController.tokenCache.Set(key, tokenCacheStored, time.Hour)
token, err := rollerController.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, token, tokenCacheStored)
})
convey.Convey("token generation failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return "", errors.New("token generation failed")
})
defer patchGuard.Reset()
token, err := rollerController.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token generation success", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return tokenCacheStored, nil
})
defer patchGuard.Reset()
token, err := rollerController.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, tokenCacheStored, token)
})
}
func TestRoller_Register(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "roller_test_register",
},
}
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("verify token failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return false, errors.New("verify token failure")
})
defer patchGuard.Reset()
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("notifier failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
return nil, false
})
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
assert.Equal(t, *subscription, rpc.Subscription{})
})
convey.Convey("register failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var taskWorker *proof.TaskWorker
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
return nil, errors.New("register error")
})
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("register success", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var taskWorker *proof.TaskWorker
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
return nil, nil
})
_, err := rollerController.Register(context.Background(), tmpAuthMsg)
assert.NoError(t, err)
})
}
func TestRoller_SubmitProof(t *testing.T) {
tmpAuthMsg, prvKey := geneAuthMsg(t)
pubKey, err := tmpAuthMsg.PublicKey()
assert.NoError(t, err)
id := "rollers_info_test"
tmpProof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
Type: message.ProofTypeChunk,
ID: id,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
assert.NoError(t, tmpProof.Sign(prvKey))
proofPubKey, err := tmpProof.PublicKey()
assert.NoError(t, err)
assert.Equal(t, pubKey, proofPubKey)
var proverTaskOrm *orm.ProverTask
patchGuard := gomonkey.ApplyMethodFunc(proverTaskOrm, "GetProverTasks", func(ctx context.Context, fields map[string]interface{}, orderByList []string, offset, limit int) ([]orm.ProverTask, error) {
return nil, nil
})
defer patchGuard.Reset()
rollermanager.InitRollerManager(nil)
taskChan, err := rollermanager.Manager.Register(context.Background(), pubKey, tmpAuthMsg.Identity)
assert.NotNil(t, taskChan)
assert.NoError(t, err)
convey.Convey("verify failure", t, func() {
var s *message.ProofMsg
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return false, errors.New("proof verify error")
})
err = rollerController.SubmitProof(tmpProof)
assert.Error(t, err)
})
var s *message.ProofMsg
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return true, nil
})
var chunkOrm *orm.Chunk
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProofByHash", func(context.Context, string, *message.AggProof, uint64, ...*gorm.DB) error {
return nil
})
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
return nil
})
var batchOrm *orm.Batch
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProofByHash", func(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
return nil
})
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
return nil
})
convey.Convey("get none rollers of prover task", t, func() {
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
return nil, nil
})
tmpProof1 := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: "10001",
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
tmpProof1.Sign(privKey)
_, err1 := tmpProof1.PublicKey()
assert.NoError(t, err1)
err2 := rollerController.SubmitProof(tmpProof1)
fmt.Println(err2)
targetErr := fmt.Errorf("validator failure get none prover task for the proof")
assert.Equal(t, err2.Error(), targetErr.Error())
})
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
now := time.Now()
s := &orm.ProverTask{
TaskID: id,
ProverPublicKey: proofPubKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: "rollers_info_test",
ProvingStatus: int16(types.RollerAssigned),
CreatedAt: now,
}
return s, nil
})
patchGuard.ApplyMethodFunc(proverTaskOrm, "UpdateProverTaskProvingStatus", func(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
return nil
})
patchGuard.ApplyPrivateMethod(rollerController.proofReceiver, "proofFailure", func(hash string, pubKey string, proofMsgType message.ProofType) {
})
convey.Convey("proof msg status is not ok", t, func() {
tmpProof.Status = message.StatusProofError
err1 := rollerController.SubmitProof(tmpProof)
assert.NoError(t, err1)
})
tmpProof.Status = message.StatusOk
var db *gorm.DB
patchGuard.ApplyMethodFunc(db, "Transaction", func(fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) (err error) {
return nil
})
var tmpVerifier *verifier.Verifier
convey.Convey("verifier proof failure", t, func() {
targetErr := errors.New("verify proof failure")
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyProof", func(proof *message.AggProof) (bool, error) {
return false, targetErr
})
err1 := rollerController.SubmitProof(tmpProof)
assert.Nil(t, err1)
})
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyProof", func(proof *message.AggProof) (bool, error) {
return true, nil
})
patchGuard.ApplyPrivateMethod(rollerController.proofReceiver, "closeProofTask", func(hash string, pubKey string, proofMsg *message.ProofMsg, rollersInfo *coordinatorType.RollersInfo) error {
return nil
})
err1 := rollerController.SubmitProof(tmpProof)
assert.Nil(t, err1)
}

View File

@@ -0,0 +1,30 @@
package api
import (
"context"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
// RollerAPI for rollers inorder to register and submit proof
type RollerAPI interface {
RequestToken(authMsg *message.AuthMsg) (string, error)
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
SubmitProof(proof *message.ProofMsg) error
}
// RegisterAPIs register api for coordinator
func RegisterAPIs(cfg *config.Config, db *gorm.DB) []rpc.API {
return []rpc.API{
{
Namespace: "roller",
Service: RollerAPI(NewRollerController(cfg.RollerManagerConfig, db)),
Public: true,
},
}
}

View File

@@ -0,0 +1,167 @@
package cron
import (
"context"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/collector"
"scroll-tech/coordinator/internal/orm"
)
// Collector collect the block batch or agg task to send to prover
type Collector struct {
cfg *config.Config
db *gorm.DB
ctx context.Context
stopRunChan chan struct{}
stopTimeoutChan chan struct{}
collectors map[message.ProofType]collector.Collector
proverTaskOrm *orm.ProverTask
chunkOrm *orm.Chunk
batchOrm *orm.Batch
}
// NewCollector create a collector to cron collect the data to send to prover
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config) *Collector {
c := &Collector{
cfg: cfg,
db: db,
ctx: ctx,
stopRunChan: make(chan struct{}),
stopTimeoutChan: make(chan struct{}),
collectors: make(map[message.ProofType]collector.Collector),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
}
c.collectors[message.ProofTypeBatch] = collector.NewBatchProofCollector(cfg, db)
c.collectors[message.ProofTypeChunk] = collector.NewChunkProofCollector(cfg, db)
go c.run()
go c.timeoutProofTask()
log.Info("Start coordinator successfully.")
return c
}
// Stop all the collector
func (c *Collector) Stop() {
c.stopRunChan <- struct{}{}
c.stopTimeoutChan <- struct{}{}
}
// run loop and cron collect
func (c *Collector) run() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("collector panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 2)
for {
select {
case <-ticker.C:
for _, tmpCollector := range c.collectors {
if err := tmpCollector.Collect(c.ctx); err != nil {
log.Warn("collect data to prover failure", "collector name", tmpCollector.Name(), "error", err)
}
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopRunChan:
log.Info("the coordinator run loop exit")
return
}
}
}
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutProofTask() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("timeout proof task panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 2)
for {
select {
case <-ticker.C:
assignedProverTasks, err := c.proverTaskOrm.GetAssignedProverTasks(c.ctx, 10)
if err != nil {
log.Error("get unassigned session info failure", "error", err)
break
}
for _, assignedProverTask := range assignedProverTasks {
timeoutDuration := time.Duration(c.cfg.RollerManagerConfig.CollectionTime) * time.Minute
// here not update the block batch proving status failed, because the collector loop
// will check the attempt times. if reach the times, the collector will set the block batch
// proving status.
if time.Since(assignedProverTask.CreatedAt) >= timeoutDuration {
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
err = c.db.Transaction(func(tx *gorm.DB) error {
// update prover task proving status as RollerProofInvalid
if err = c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.RollerProofInvalid, tx); err != nil {
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update prover task failure type
if err = c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
return err
}
// update the task to unassigned, let collector restart it
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
if err = c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
if err = c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
}
}
return nil
})
if err != nil {
log.Error("check task proof is timeout failure", "error", err)
}
}
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopTimeoutChan:
log.Info("the coordinator run loop exit")
return
}
}
}

View File

@@ -0,0 +1,108 @@
package collector
import (
"context"
"fmt"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// BatchProofCollector is collector implement for batch proof
type BatchProofCollector struct {
BaseCollector
}
// NewBatchProofCollector new a batch collector
func NewBatchProofCollector(cfg *config.Config, db *gorm.DB) *BatchProofCollector {
bp := &BatchProofCollector{
BaseCollector: BaseCollector{
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
},
}
return bp
}
// Name return the batch proof collector name
func (ac *BatchProofCollector) Name() string {
return BatchCollectorName
}
// Collect load and send batch tasks
func (ac *BatchProofCollector) Collect(ctx context.Context) error {
batchTasks, err := ac.batchOrm.GetUnassignedBatches(ctx, 1)
if err != nil {
return fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
}
if len(batchTasks) == 0 {
return nil
}
if len(batchTasks) != 1 {
return fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
}
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash)
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
return fmt.Errorf("no idle common roller when starting proof generation session, id:%s", batchTask.Hash)
}
if !ac.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
return fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
}
rollerStatusList, err := ac.sendTask(ctx, batchTask.Hash)
if err != nil {
return fmt.Errorf("send batch task id:%s err:%w", batchTask.Hash, err)
}
transErr := ac.db.Transaction(func(tx *gorm.DB) error {
// Update session proving status as assigned.
if err = ac.batchOrm.UpdateProvingStatus(ctx, batchTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
return fmt.Errorf("failed to update task status, id:%s, error:%w", batchTask.Hash, err)
}
for _, rollerStatus := range rollerStatusList {
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: rollerStatus.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: rollerStatus.Name,
ProvingStatus: int16(types.RollerAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
}
// Store session info.
if err = ac.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
return fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
}
}
return nil
})
return transErr
}
func (ac *BatchProofCollector) sendTask(ctx context.Context, taskID string) ([]*coordinatorType.RollerStatus, error) {
// get chunk proofs from db
chunkProofs, err := ac.chunkOrm.GetProofsByBatchHash(ctx, taskID)
if err != nil {
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", taskID, err)
return nil, err
}
return ac.BaseCollector.sendTask(message.ProofTypeBatch, taskID, nil, chunkProofs)
}

View File

@@ -0,0 +1,114 @@
package collector
import (
"context"
"fmt"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ChunkProofCollector the chunk proof collector
type ChunkProofCollector struct {
BaseCollector
}
// NewChunkProofCollector new a chunk proof collector
func NewChunkProofCollector(cfg *config.Config, db *gorm.DB) *ChunkProofCollector {
cp := &ChunkProofCollector{
BaseCollector: BaseCollector{
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
},
}
return cp
}
// Name return a block batch collector name
func (cp *ChunkProofCollector) Name() string {
return ChunkCollectorName
}
// Collect the chunk proof which need to prove
func (cp *ChunkProofCollector) Collect(ctx context.Context) error {
// load and send chunk tasks
chunkTasks, err := cp.chunkOrm.GetUnassignedChunks(ctx, 1)
if err != nil {
return fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
}
if len(chunkTasks) == 0 {
return nil
}
if len(chunkTasks) != 1 {
return fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
}
chunkTask := chunkTasks[0]
log.Info("start chunk generation session", "id", chunkTask.Hash)
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
return fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
}
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
return fmt.Errorf("no idle chunk roller when starting proof generation session, id:%s", chunkTask.Hash)
}
rollerStatusList, err := cp.sendTask(ctx, chunkTask.Hash)
if err != nil {
return fmt.Errorf("send task failure, id:%s error:%w", chunkTask.Hash, err)
}
transErr := cp.db.Transaction(func(tx *gorm.DB) error {
// Update session proving status as assigned.
if err = cp.chunkOrm.UpdateProvingStatus(ctx, chunkTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
log.Error("failed to update task status", "id", chunkTask.Hash, "err", err)
return err
}
for _, rollerStatus := range rollerStatusList {
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: rollerStatus.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: rollerStatus.Name,
ProvingStatus: int16(types.RollerAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
}
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
return fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, rollerStatus.PublicKey, err)
}
}
return nil
})
return transErr
}
func (cp *ChunkProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.RollerStatus, error) {
// Get block hashes.
wrappedBlocks, err := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
if err != nil {
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, err)
}
blockHashes := make([]common.Hash, len(wrappedBlocks))
for i, wrappedBlock := range wrappedBlocks {
blockHashes[i] = wrappedBlock.Header.Hash()
}
return cp.BaseCollector.sendTask(message.ProofTypeChunk, hash, blockHashes, nil)
}

View File

@@ -0,0 +1,125 @@
package collector
import (
"context"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
const (
// BatchCollectorName the name of batch collector
BatchCollectorName = "batch_collector"
// ChunkCollectorName the name of chunk collector
ChunkCollectorName = "chunk_collector"
)
var coordinatorSessionsTimeoutTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
// Collector the interface of a collector who send data to prover
type Collector interface {
Name() string
Collect(ctx context.Context) error
}
// BaseCollector a base collector which contain series functions
type BaseCollector struct {
cfg *config.Config
ctx context.Context
db *gorm.DB
batchOrm *orm.Batch
chunkOrm *orm.Chunk
blockOrm *orm.L2Block
proverTaskOrm *orm.ProverTask
}
// checkAttempts use the count of prover task info to check the attempts
func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.ProofType) bool {
whereFields := make(map[string]interface{})
whereFields["task_id"] = hash
whereFields["task_type"] = int16(taskType)
proverTasks, err := b.proverTaskOrm.GetProverTasks(b.ctx, whereFields, nil, 0, 0)
if err != nil {
log.Error("get prover task error", "hash id", hash, "error", err)
return true
}
if len(proverTasks) >= int(b.cfg.RollerManagerConfig.SessionAttempts) {
coordinatorSessionsTimeoutTotalCounter.Inc(1)
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
for _, proverTask := range proverTasks {
if types.ProvingStatus(proverTask.ProvingStatus) == types.ProvingTaskFailed {
rollermanager.Manager.FreeTaskIDForRoller(proverTask.ProverPublicKey, hash)
}
}
transErr := b.db.Transaction(func(tx *gorm.DB) error {
switch message.ProofType(proverTasks[0].TaskType) {
case message.ProofTypeChunk:
if err := b.chunkOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
}
case message.ProofTypeBatch:
if err := b.batchOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
}
}
// update the prover task status to let timeout checker don't check it.
if err := b.proverTaskOrm.UpdateAllProverTaskProvingStatusOfTaskID(b.ctx, message.ProofType(proverTasks[0].TaskType), hash, types.RollerProofInvalid, tx); err != nil {
log.Error("failed to update prover task proving_status as failed", "msg.ID", hash, "error", err)
}
return nil
})
if transErr == nil {
return false
}
}
return true
}
func (b *BaseCollector) sendTask(proveType message.ProofType, hash string, blockHashes []common.Hash, subProofs []*message.AggProof) ([]*coordinatorType.RollerStatus, error) {
sendMsg := &message.TaskMsg{
ID: hash,
Type: proveType,
BlockHashes: blockHashes,
SubProofs: subProofs,
}
var err error
var rollerStatusList []*coordinatorType.RollerStatus
for i := uint8(0); i < b.cfg.RollerManagerConfig.RollersPerSession; i++ {
rollerPubKey, rollerName, sendErr := rollermanager.Manager.SendTask(proveType, sendMsg)
if sendErr != nil {
err = sendErr
continue
}
rollermanager.Manager.UpdateMetricRollerProofsLastAssignedTimestampGauge(rollerPubKey)
rollerStatus := &coordinatorType.RollerStatus{
PublicKey: rollerPubKey,
Name: rollerName,
Status: types.RollerAssigned,
}
rollerStatusList = append(rollerStatusList, rollerStatus)
}
if err != nil {
return nil, err
}
return rollerStatusList, nil
}

View File

@@ -0,0 +1,320 @@
package proof
import (
"context"
"errors"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
)
var (
coordinatorProofsGeneratedFailedTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/generated/failed/time", metrics.ScrollRegistry)
coordinatorProofsReceivedTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
coordinatorProofsVerifiedSuccessTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/verified/success/time", metrics.ScrollRegistry)
coordinatorProofsVerifiedFailedTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/verified/failed/time", metrics.ScrollRegistry)
coordinatorSessionsFailedTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/sessions/failed/total", metrics.ScrollRegistry)
)
var (
// ErrValidatorFailureProofMsgStatusNotOk proof msg status not ok
ErrValidatorFailureProofMsgStatusNotOk = errors.New("validator failure proof msg status not ok")
// ErrValidatorFailureProverTaskEmpty get none prover task
ErrValidatorFailureProverTaskEmpty = errors.New("validator failure get none prover task for the proof")
// ErrValidatorFailureRollerInfoHasProofValid proof is vaild
ErrValidatorFailureRollerInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
)
// ZKProofReceiver the proof receiver
type ZKProofReceiver struct {
chunkOrm *orm.Chunk
batchOrm *orm.Batch
proverTaskOrm *orm.ProverTask
db *gorm.DB
cfg *config.RollerManagerConfig
verifier *verifier.Verifier
}
// NewZKProofReceiver create a proof receiver
func NewZKProofReceiver(cfg *config.RollerManagerConfig, db *gorm.DB) *ZKProofReceiver {
vf, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
return &ZKProofReceiver{
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
cfg: cfg,
db: db,
verifier: vf,
}
}
// HandleZkProof handle a ZkProof submitted from a roller.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.ProofMsg) error {
pk, _ := proofMsg.PublicKey()
rollermanager.Manager.UpdateMetricRollerProofsLastFinishedTimestampGauge(pk)
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
if proverTask == nil || err != nil {
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
return ErrValidatorFailureProverTaskEmpty
}
if err = m.validator(proverTask, pk, proofMsg); err != nil {
if errors.Is(err, ErrValidatorFailureProofMsgStatusNotOk) {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
}
return nil
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
// update proverTask's proof. will also update status to `ProvingTaskProved`.
err = m.proverTaskOrm.UpdateProverTaskProof(ctx, proofMsg.Type, proofMsg.ID, pk, proofMsg.Proof)
if err != nil {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
log.Error("failed to store basic proof into db", "error", err)
return err
}
coordinatorProofsReceivedTotalCounter.Inc(1)
success, verifyErr := m.verifier.VerifyProof(proofMsg.Proof)
if verifyErr != nil || !success {
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
log.Error("failed to verify zk proof", "proof id", proofMsg.ID, "roller pk", pk, "prove type",
proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
}
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
// TODO: Roller needs to be slashed if proof is invalid.
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsVerifiedFailedTimeTimer(pk, proofTime)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
return nil
}
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg); err != nil {
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg.Type)
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsVerifiedSuccessTimeTimer(pk, proofTime)
return nil
}
func (m *ZKProofReceiver) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
batchHash, err := m.chunkOrm.GetChunkBatchHash(ctx, chunkHash)
if err != nil {
return err
}
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(ctx, batchHash)
if err != nil {
return err
}
if allReady {
err := m.chunkOrm.UpdateChunkProofsStatusByBatchHash(ctx, batchHash, types.ChunkProofsStatusReady)
if err != nil {
return err
}
}
return nil
}
func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
// Ensure this roller is eligible to participate in the prover task.
if types.RollerProveStatus(proverTask.ProvingStatus) == types.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn("roller has already submitted valid proof in proof session", "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof id", proofMsg.ProofDetail.ID)
return ErrValidatorFailureRollerInfoHasProofValid
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
if proofMsg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
rollermanager.Manager.UpdateMetricRollerProofsGeneratedFailedTimeTimer(pk, proofTime)
log.Info("proof generated by roller failed", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
"roller pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
return ErrValidatorFailureProofMsgStatusNotOk
}
return nil
}
func (m *ZKProofReceiver) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskFailed); err != nil {
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
func (m *ZKProofReceiver) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
}
}
func (m *ZKProofReceiver) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
// update proverTask to verifier
// update chunkOrm/batchOrm's proof and status
// TODO: need to remove a status
// // store proof content
// switch proofMsg.Type {
// case message.ProofTypeChunk:
// storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
// if dbErr := m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.Proof, proofTimeSec, tx); dbErr != nil {
// return fmt.Errorf("failed to store chunk proof into db, err:%w", dbErr)
// }
// if dbErr := m.chunkOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskVerified, tx); dbErr != nil {
// return fmt.Errorf("failed to update chunk task status as proved, error:%w", dbErr)
// }
// return nil
// })
// case message.ProofTypeBatch:
// storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
// if dbErr := m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.Proof, proofTimeSec, tx); dbErr != nil {
// return fmt.Errorf("failed to store batch proof into db, error:%w", dbErr)
// }
// if dbErr := m.batchOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskVerified, tx); dbErr != nil {
// return fmt.Errorf("failed to update batch task status as proved, error:%w", dbErr)
// }
// return nil
// })
// }
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg.Type, types.ProvingTaskVerified); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
return err
}
rollermanager.Manager.FreeTaskIDForRoller(pubKey, hash)
return nil
}
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ZKProofReceiver) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
// if the prover task failure type is SessionInfoFailureTimeout,
// just skip update the status because the proof result come so slow.
if m.checkIsTimeoutFailure(ctx, hash, proverPublicKey) {
return nil
}
var proverTaskStatus types.RollerProveStatus
switch status {
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
proverTaskStatus = types.RollerProofInvalid
case types.ProvingTaskVerified:
proverTaskStatus = types.RollerProofValid
}
err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsgType, hash, proverPublicKey, proverTaskStatus); updateErr != nil {
return updateErr
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsgType) {
return nil
}
switch proofMsgType {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
return err
}
case message.ProofTypeBatch:
if err := m.batchOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
return err
}
}
return nil
})
if err != nil {
return err
}
if status == types.ProvingTaskVerified && proofMsgType == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, hash); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr
}
}
return nil
}
func (m *ZKProofReceiver) checkIsTaskSuccess(ctx context.Context, hash string, proofType message.ProofType) bool {
var provingStatus types.ProvingStatus
var err error
switch proofType {
case message.ProofTypeChunk:
provingStatus, err = m.chunkOrm.GetProvingStatusByHash(ctx, hash)
if err != nil {
return false
}
case message.ProofTypeBatch:
provingStatus, err = m.batchOrm.GetProvingStatusByHash(ctx, hash)
if err != nil {
return false
}
}
return provingStatus == types.ProvingTaskVerified
}
func (m *ZKProofReceiver) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, hash, proverPublicKey)
if err != nil {
return false
}
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
return true
}
return false
}

View File

@@ -0,0 +1,79 @@
package proof
import (
"context"
"fmt"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/metrics"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/logic/rollermanager"
)
var coordinatorRollersDisconnectsTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/rollers/disconnects/total", metrics.ScrollRegistry)
// TaskWorker held the roller task connection
type TaskWorker struct{}
// NewTaskWorker create a task worker
func NewTaskWorker() *TaskWorker {
return &TaskWorker{}
}
// AllocTaskWorker alloc a task worker goroutine
func (t *TaskWorker) AllocTaskWorker(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
pubKey, err := authMsg.PublicKey()
if err != nil {
return &rpc.Subscription{}, fmt.Errorf("AllocTaskWorker auth msg public key error:%w", err)
}
identity := authMsg.Identity
// create or get the roller message channel
taskCh, err := rollermanager.Manager.Register(ctx, pubKey, identity)
if err != nil {
return &rpc.Subscription{}, err
}
rpcSub := notifier.CreateSubscription()
go t.worker(rpcSub, notifier, pubKey, identity, taskCh)
log.Info("roller register", "name", identity.Name, "pubKey", pubKey, "version", identity.Version)
return rpcSub, nil
}
// TODO worker add metrics
func (t *TaskWorker) worker(rpcSub *rpc.Subscription, notifier *rpc.Notifier, pubKey string, identity *message.Identity, taskCh <-chan *message.TaskMsg) {
defer func() {
if err := recover(); err != nil {
log.Error("task worker subId:%d panic for:%v", err)
}
rollermanager.Manager.FreeRoller(pubKey)
log.Info("roller unregister", "name", identity.Name, "pubKey", pubKey)
}()
for {
select {
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case err := <-rpcSub.Err():
coordinatorRollersDisconnectsTotalCounter.Inc(1)
log.Warn("client stopped the ws connection", "name", identity.Name, "pubkey", pubKey, "err", err)
return
case <-notifier.Closed():
return
}
}
}

View File

@@ -0,0 +1,60 @@
package rollermanager
import (
"time"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
)
type rollerMetrics struct {
rollerProofsVerifiedSuccessTimeTimer gethMetrics.Timer
rollerProofsVerifiedFailedTimeTimer gethMetrics.Timer
rollerProofsGeneratedFailedTimeTimer gethMetrics.Timer
rollerProofsLastAssignedTimestampGauge gethMetrics.Gauge
rollerProofsLastFinishedTimestampGauge gethMetrics.Gauge
}
func (r *rollerManager) UpdateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
}
}
}
func (r *rollerManager) UpdateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
}
}
}

View File

@@ -0,0 +1,203 @@
package rollermanager
import (
"context"
"crypto/rand"
"errors"
"fmt"
"math/big"
"sync"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/orm"
)
var (
once sync.Once
// Manager the global roller manager
Manager *rollerManager
)
// RollerNode is the interface that controls the rollers
type rollerNode struct {
// Roller name
Name string
// Roller type
Type message.ProofType
// Roller public key
PublicKey string
// Roller version
Version string
// task channel
taskChan chan *message.TaskMsg
// session id list which delivered to roller.
TaskIDs cmap.ConcurrentMap
// Time of message creation
registerTime time.Time
metrics *rollerMetrics
}
type rollerManager struct {
rollerPool cmap.ConcurrentMap
proverTaskOrm *orm.ProverTask
}
// InitRollerManager init a roller manager
func InitRollerManager(db *gorm.DB) {
once.Do(func() {
Manager = &rollerManager{
rollerPool: cmap.New(),
proverTaskOrm: orm.NewProverTask(db),
}
})
}
// Register the identity message to roller manager with the public key
func (r *rollerManager) Register(ctx context.Context, proverPublicKey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
node, ok := r.rollerPool.Get(proverPublicKey)
if !ok {
taskIDs, err := r.reloadRollerAssignedTasks(ctx, proverPublicKey)
if err != nil {
return nil, fmt.Errorf("register error:%w", err)
}
rMs := &rollerMetrics{
rollerProofsVerifiedSuccessTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsVerifiedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsGeneratedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsLastAssignedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
rollerProofsLastFinishedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
}
node = &rollerNode{
Name: identity.Name,
Type: identity.RollerType,
Version: identity.Version,
PublicKey: proverPublicKey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
metrics: rMs,
}
r.rollerPool.Set(proverPublicKey, node)
}
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "prover_name", identity.Name, "roller_type", identity.RollerType, "public key", proverPublicKey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
roller.registerTime = time.Now()
return roller.taskChan, nil
}
func (r *rollerManager) reloadRollerAssignedTasks(ctx context.Context, proverPublicKey string) (*cmap.ConcurrentMap, error) {
var assignedProverTasks []orm.ProverTask
page := 0
limit := 100
for {
page++
whereFields := make(map[string]interface{})
whereFields["proving_status"] = int16(types.RollerAssigned)
orderBy := []string{"id asc"}
offset := (page - 1) * limit
batchAssignedProverTasks, err := r.proverTaskOrm.GetProverTasks(ctx, whereFields, orderBy, offset, limit)
if err != nil {
log.Warn("reloadRollerAssignedTasks get all assigned failure", "error", err)
return nil, fmt.Errorf("reloadRollerAssignedTasks error:%w", err)
}
if len(batchAssignedProverTasks) < limit {
break
}
assignedProverTasks = append(assignedProverTasks, batchAssignedProverTasks...)
}
taskIDs := cmap.New()
for _, assignedProverTask := range assignedProverTasks {
if assignedProverTask.ProverPublicKey == proverPublicKey && assignedProverTask.ProvingStatus == int16(types.RollerAssigned) {
taskIDs.Set(assignedProverTask.TaskID, struct{}{})
}
}
return &taskIDs, nil
}
// SendTask send the need proved message to roller
func (r *rollerManager) SendTask(rollerType message.ProofType, msg *message.TaskMsg) (string, string, error) {
tmpRoller := r.selectRoller(rollerType)
if tmpRoller == nil {
return "", "", errors.New("selectRoller returns nil")
}
select {
case tmpRoller.taskChan <- msg:
tmpRoller.TaskIDs.Set(msg.ID, struct{}{})
default:
err := fmt.Errorf("roller channel is full, rollerName:%s, publicKey:%s", tmpRoller.Name, tmpRoller.PublicKey)
return "", "", err
}
r.UpdateMetricRollerProofsLastAssignedTimestampGauge(tmpRoller.PublicKey)
return tmpRoller.PublicKey, tmpRoller.Name, nil
}
// ExistTaskIDForRoller check the task exist
func (r *rollerManager) ExistTaskIDForRoller(pk string, id string) bool {
node, ok := r.rollerPool.Get(pk)
if !ok {
return false
}
roller := node.(*rollerNode)
return roller.TaskIDs.Has(id)
}
// FreeRoller free the roller with the pk key
func (r *rollerManager) FreeRoller(pk string) {
r.rollerPool.Pop(pk)
}
// FreeTaskIDForRoller free a task of the pk roller
func (r *rollerManager) FreeTaskIDForRoller(pk string, id string) {
if node, ok := r.rollerPool.Get(pk); ok {
roller := node.(*rollerNode)
roller.TaskIDs.Pop(id)
}
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (r *rollerManager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
for item := range r.rollerPool.IterBuffered() {
roller := item.Val.(*rollerNode)
if roller.TaskIDs.Count() == 0 && roller.Type == rollerType {
count++
}
}
return count
}
func (r *rollerManager) selectRoller(rollerType message.ProofType) *rollerNode {
pubkeys := r.rollerPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
if val, ok := r.rollerPool.Get(pubkeys[idx.Int64()]); ok {
rn := val.(*rollerNode)
if rn.TaskIDs.Count() == 0 && rn.Type == rollerType {
return rn
}
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}
return nil
}

View File

@@ -11,8 +11,7 @@ import (
const InvalidTestProof = "this is a invalid proof"
// Verifier represents a mock halo2 verifier.
type Verifier struct {
}
type Verifier struct{}
// NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {

View File

@@ -1,6 +1,6 @@
//go:build ffi
package verifier_test
package verifier
import (
"encoding/json"
@@ -9,12 +9,11 @@ import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/verifier"
"github.com/stretchr/testify/assert"
)
var (
@@ -30,7 +29,7 @@ func TestFFI(t *testing.T) {
ParamsPath: *paramsPath,
AggVkPath: *aggVkPath,
}
v, err := verifier.NewVerifier(cfg)
v, err := NewVerifier(cfg)
as.NoError(err)
f, err := os.Open(*proofPath)

View File

@@ -215,7 +215,11 @@ func (o *Chunk) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHas
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus) error {
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
@@ -228,7 +232,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
@@ -239,7 +243,11 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
}
// UpdateProofByHash updates the batch proof by hash.
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
@@ -249,7 +257,7 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
db := o.db.WithContext(ctx)
db = db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)

View File

@@ -245,7 +245,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk) (*Chunk, er
}
// UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus) error {
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
@@ -257,8 +257,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db.WithContext(ctx)
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)
@@ -269,7 +272,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
}
// UpdateProofByHash updates the chunk proof by hash.
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
@@ -279,7 +286,7 @@ func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *messa
updateFields["proof"] = proofBytes
updateFields["proof_time_sec"] = proofTimeSec
db := o.db.WithContext(ctx)
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("hash", hash)

View File

@@ -49,6 +49,34 @@ func (*ProverTask) TableName() string {
return "prover_task"
}
// GetProverTasks get prover tasks
func (o *ProverTask) GetProverTasks(ctx context.Context, fields map[string]interface{}, orderByList []string, offset, limit int) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
for k, v := range fields {
db = db.Where(k, v)
}
for _, orderBy := range orderByList {
db = db.Order(orderBy)
}
if limit != 0 {
db = db.Limit(limit)
}
if offset != 0 {
db = db.Offset(offset)
}
var proverTasks []ProverTask
if err := db.Find(&proverTasks).Error; err != nil {
return nil, err
}
return proverTasks, nil
}
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
// The returned prover task objects are sorted in ascending order by their ids.
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string) ([]*ProverTask, error) {
@@ -68,9 +96,42 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string
return proverTasks, nil
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask) error {
// GetProverTaskByTaskIDAndPubKey get prover task taskID and public key
func (o *ProverTask) GetProverTaskByTaskIDAndPubKey(ctx context.Context, taskID, proverPublicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("prover_public_key", proverPublicKey)
var proverTask ProverTask
err := db.First(&proverTask).Error
if err != nil {
return nil, fmt.Errorf("ProverTask.GetProverTaskByTaskIDAndPubKey err:%w, taskID:%s, pubukey:%s", err, taskID, proverPublicKey)
}
return &proverTask, nil
}
// GetAssignedProverTasks get the assigned prover task
func (o *ProverTask) GetAssignedProverTasks(ctx context.Context, limit int) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("proving_status", int(types.RollerAssigned))
db = db.Limit(limit)
var proverTasks []ProverTask
err := db.Find(&proverTasks).Error
if err != nil {
return nil, fmt.Errorf("ProverTask.GetAssignedProverTasks error:%w", err)
}
return proverTasks, nil
}
// SetProverTask updates or inserts a ProverTask record.
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
db := o.db.WithContext(ctx)
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
@@ -84,13 +145,81 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask)
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus) error {
db := o.db.WithContext(ctx)
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", proofType, taskID, pk)
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
if err := db.Update("proving_status", status).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, proof type: %v, taskID: %v, prover public key: %v, status: %v", err, proofType.String(), taskID, pk, status.String())
}
return nil
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, proofType message.ProofType, taskID string, pk string, proof *message.AggProof) error {
db := o.db
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
proofBytes, err := json.Marshal(proof)
if err != nil {
return err
}
if err := db.Update("proof", proofBytes).Update("proving_status", types.ProvingTaskProved).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, proof type: %v, taskID: %v, prover public key: %v, status: %v", err, proofType.String(), taskID, pk, status.String())
}
return nil
}
// UpdateAllProverTaskProvingStatusOfTaskID updates all the proving_status of a specific task id.
func (o *ProverTask) UpdateAllProverTaskProvingStatusOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ?", int(proofType), taskID)
if err := db.Update("proving_status", status).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateAllProverTaskProvingStatusOfTaskID error: %w, proof type: %v, taskID: %v, status: %v", err, proofType.String(), taskID, status.String())
}
return nil
}
// UpdateProverTaskFailureType update the prover task failure type
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, proofType message.ProofType, taskID string, pk string, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("prover_public_key", pk).Where("task_type", int(proofType))
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskFailureType error: %w, proof type: %v, taskID: %v, prover public key: %v, failure type: %v", err, proofType.String(), taskID, pk, failureType.String())
}
return nil
}
// UpdateAllProverTaskFailureTypeOfTaskID update the prover task failure type
func (o *ProverTask) UpdateAllProverTaskFailureTypeOfTaskID(ctx context.Context, proofType message.ProofType, taskID string, failureType types.ProverTaskFailureType, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_id", taskID).Where("task_type", int(proofType))
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateAllProverTaskFailureTypeOfTaskID error: %w, proof type: %v, taskID: %v, failure type: %v", err, proofType.String(), taskID, failureType.String())
}
return nil
}

View File

@@ -0,0 +1,21 @@
package types
import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
)
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
}
// BatchInfo contains the BlockBatch's main info
type BatchInfo struct {
Index uint64 `json:"index"`
Hash string `json:"hash"`
StateRoot string `json:"state_root"`
}

View File

@@ -0,0 +1,21 @@
package types
import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
// RollersInfo is assigned rollers info of a task (session)
type RollersInfo struct {
ID string `json:"id"`
RollerStatusList []*RollerStatus `json:"rollers"`
StartTimestamp int64 `json:"start_timestamp"`
ProveType message.ProofType `json:"prove_type,omitempty"`
}
// RollerStatus is the roller name and roller prove status
type RollerStatus struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`
Status types.RollerProveStatus `json:"status"`
}

View File

@@ -1,853 +0,0 @@
package coordinator
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"golang.org/x/exp/rand"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils/workerpool"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/verifier"
)
var (
// proofs
coordinatorProofsReceivedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
coordinatorProofsVerifiedSuccessTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/success/time", metrics.ScrollRegistry)
coordinatorProofsVerifiedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/failed/time", metrics.ScrollRegistry)
coordinatorProofsGeneratedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/generated/failed/time", metrics.ScrollRegistry)
// sessions
coordinatorSessionsSuccessTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/success/total", metrics.ScrollRegistry)
coordinatorSessionsTimeoutTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
coordinatorSessionsFailedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/failed/total", metrics.ScrollRegistry)
coordinatorSessionsActiveNumberGauge = geth_metrics.NewRegisteredCounter("coordinator/sessions/active/number", metrics.ScrollRegistry)
)
const (
proofAndPkBufferSize = 10
)
type rollerProofStatus struct {
id string
typ message.ProofType
pk string
status types.RollerProveStatus
}
// Contains all the information on an ongoing proof generation session.
type session struct {
taskID string
proverTasks []*orm.ProverTask
// finish channel is used to pass the public key of the rollers who finished proving process.
finishChan chan rollerProofStatus
}
// Manager is responsible for maintaining connections with active rollers,
// sending the challenges, and receiving proofs. It also regulates the reward
// distribution. All read and write logic and connection handling happens through
// a modular websocket server, contained within the Manager. Incoming messages are
// then passed to the Manager where the actual handling logic resides.
type Manager struct {
// The manager context.
ctx context.Context
// The roller manager configuration.
cfg *config.RollerManagerConfig
// The indicator whether the backend is running or not.
running int32
// A mutex guarding the boolean below.
mu sync.RWMutex
// A map containing all active proof generation sessions.
sessions map[string]*session
// A map containing proof failed or verify failed proof.
rollerPool cmap.ConcurrentMap
failedSessionInfos map[string]*SessionInfo
// A direct connection to the Halo2 verifier, used to verify
// incoming proofs.
verifier *verifier.Verifier
// orm interface
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
proverTaskOrm *orm.ProverTask
// Token cache
tokenCache *cache.Cache
// A mutex guarding registration
registerMu sync.RWMutex
// Verifier worker pool
verifierWorkerPool *workerpool.WorkerPool
}
// New returns a new instance of Manager. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `manager.Start`.
func New(ctx context.Context, cfg *config.RollerManagerConfig, db *gorm.DB) (*Manager, error) {
v, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
return nil, err
}
log.Info("Start coordinator successfully.")
return &Manager{
ctx: ctx,
cfg: cfg,
rollerPool: cmap.New(),
sessions: make(map[string]*session),
failedSessionInfos: make(map[string]*SessionInfo),
verifier: v,
l2BlockOrm: orm.NewL2Block(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
verifierWorkerPool: workerpool.NewWorkerPool(cfg.MaxVerifierWorkers),
}, nil
}
// Start the Manager module.
func (m *Manager) Start() error {
if m.isRunning() {
return nil
}
m.verifierWorkerPool.Run()
m.restorePrevSessions()
atomic.StoreInt32(&m.running, 1)
go m.Loop()
return nil
}
// Stop the Manager module, for a graceful shutdown.
func (m *Manager) Stop() {
if !m.isRunning() {
return
}
m.verifierWorkerPool.Stop()
atomic.StoreInt32(&m.running, 0)
}
// isRunning returns an indicator whether manager is running or not.
func (m *Manager) isRunning() bool {
return atomic.LoadInt32(&m.running) == 1
}
// Loop keeps the manager running.
func (m *Manager) Loop() {
var (
tick = time.NewTicker(time.Second * 2)
chunkTasks []*orm.Chunk
batchTasks []*orm.Batch
)
defer tick.Stop()
for {
select {
case <-tick.C:
// load and send batch tasks
if len(batchTasks) == 0 {
var err error
batchTasks, err = m.batchOrm.GetUnassignedBatches(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeBatch))
if err != nil {
log.Error("failed to get unassigned batch proving tasks", "error", err)
continue
}
}
// Select batch type roller and send message
for len(batchTasks) > 0 && m.StartBatchProofGenerationSession(batchTasks[0], nil) {
batchTasks = batchTasks[1:]
}
// load and send chunk tasks
if len(chunkTasks) == 0 {
// TODO: add cache
var err error
chunkTasks, err = m.chunkOrm.GetUnassignedChunks(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeChunk))
if err != nil {
log.Error("failed to get unassigned chunk proving tasks", "error", err)
continue
}
}
// Select chunk type roller and send message
for len(chunkTasks) > 0 && m.StartChunkProofGenerationSession(chunkTasks[0], nil) {
chunkTasks = chunkTasks[1:]
}
case <-m.ctx.Done():
if m.ctx.Err() != nil {
log.Error(
"manager context canceled with error",
"error", m.ctx.Err(),
)
}
return
}
}
}
func (m *Manager) restorePrevSessions() {
m.mu.Lock()
defer m.mu.Unlock()
var hashes []string
// load assigned batch tasks from db
batchTasks, err := m.batchOrm.GetAssignedBatches(m.ctx)
if err != nil {
log.Error("failed to load assigned batch tasks from db", "error", err)
return
}
for _, batchTask := range batchTasks {
hashes = append(hashes, batchTask.Hash)
}
// load assigned chunk tasks from db
chunkTasks, err := m.chunkOrm.GetAssignedChunks(m.ctx)
if err != nil {
log.Error("failed to get assigned batch batchHashes from db", "error", err)
return
}
for _, chunkTask := range chunkTasks {
hashes = append(hashes, chunkTask.Hash)
}
prevSessions, err := m.proverTaskOrm.GetProverTasksByHashes(m.ctx, hashes)
if err != nil {
log.Error("failed to recover roller session info from db", "error", err)
return
}
proverTasksMaps := make(map[string][]*orm.ProverTask)
for _, v := range prevSessions {
log.Info("restore roller info for session", "session start time", v.CreatedAt, "session id", v.TaskID, "roller name",
v.ProverName, "proof type", v.TaskType, "public key", v.ProverPublicKey, "proof status", v.ProvingStatus)
proverTasksMaps[v.TaskID] = append(proverTasksMaps[v.TaskID], v)
}
for taskID, proverTasks := range proverTasksMaps {
sess := &session{
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[taskID] = sess
go m.CollectProofs(sess)
}
}
// HandleZkProof handle a ZkProof submitted from a roller.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
var dbErr error
var success bool
// Assess if the proof generation session for the given ID is still active.
// We hold the read lock until the end of the function so that there is no
// potential race for channel deletion.
m.mu.RLock()
defer m.mu.RUnlock()
sess, ok := m.sessions[msg.ID]
if !ok {
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
}
var proverTask *orm.ProverTask
for _, si := range sess.proverTasks {
// get the send session info of this proof msg
if si.TaskID == msg.ID && si.ProverPublicKey == pk {
proverTask = si
}
}
if proverTask == nil {
return fmt.Errorf("proof generation session for id %v pk:%s does not existID", msg.ID, pk)
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
// Ensure this roller is eligible to participate in the prover task.
if types.RollerProveStatus(proverTask.ProvingStatus) == types.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn(
"roller has already submitted valid proof in proof session",
"roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey,
"proof type", proverTask.TaskType,
"proof id", msg.ID,
)
return nil
}
log.Info("handling zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName, "roller pk",
proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof time", proofTimeSec)
defer func() {
// TODO: maybe we should use db tx for the whole process?
// Roll back current proof's status.
if dbErr != nil {
if msg.Type == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset chunk task status as Unassigned", "msg.ID", msg.ID)
}
}
if msg.Type == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset batch task status as Unassigned", "msg.ID", msg.ID)
}
}
}
// set proof status
status := types.RollerProofInvalid
if success && dbErr == nil {
status = types.RollerProofValid
}
// notify the session that the roller finishes the proving process
sess.finishChan <- rollerProofStatus{msg.ID, msg.Type, pk, status}
}()
if msg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsGeneratedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info(
"proof generated by roller failed",
"proof id", msg.ID,
"roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey,
"proof type", msg.Type,
"proof time", proofTimeSec,
"error", msg.Error,
)
return nil
}
// store proof content
if msg.Type == message.ProofTypeChunk {
if dbErr = m.chunkOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
log.Error("failed to store chunk proof into db", "error", dbErr)
return dbErr
}
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update chunk task status as proved", "error", dbErr)
return dbErr
}
}
if msg.Type == message.ProofTypeBatch {
if dbErr = m.batchOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
log.Error("failed to store batch proof into db", "error", dbErr)
return dbErr
}
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
log.Error("failed to update batch task status as proved", "error", dbErr)
return dbErr
}
}
coordinatorProofsReceivedTotalCounter.Inc(1)
var verifyErr error
// TODO: wrap both chunk verifier and batch verifier
success, verifyErr = m.verifyProof(msg.Proof)
if verifyErr != nil {
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
// TODO: Roller needs to be slashed if proof is invalid.
}
if success {
if msg.Type == message.ProofTypeChunk {
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update chunk proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
return dbErr
}
if err := m.checkAreAllChunkProofsReady(msg.ID); err != nil {
log.Error("failed to check are all chunk proofs ready", "error", err)
return err
}
}
if msg.Type == message.ProofTypeBatch {
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update batch proving_status",
"msg.ID", msg.ID,
"status", types.ProvingTaskVerified,
"error", dbErr)
return dbErr
}
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec)
} else {
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
m.updateMetricRollerProofsVerifiedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", proverTask.ProverName,
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
}
return nil
}
func (m *Manager) checkAreAllChunkProofsReady(chunkHash string) error {
batchHash, err := m.chunkOrm.GetChunkBatchHash(m.ctx, chunkHash)
if err != nil {
return err
}
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(m.ctx, batchHash)
if err != nil {
return err
}
if allReady {
err := m.chunkOrm.UpdateChunkProofsStatusByBatchHash(m.ctx, batchHash, types.ChunkProofsStatusReady)
if err != nil {
return err
}
}
return nil
}
// checkAttempts use the count of prover task info to check the attempts
func (m *Manager) checkAttemptsExceeded(hash string) bool {
proverTasks, err := m.proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{hash})
if err != nil {
log.Error("get session info error", "hash id", hash, "error", err)
return true
}
if len(proverTasks) >= int(m.cfg.SessionAttempts) {
return true
}
return false
}
// CollectProofs collects proofs corresponding to a proof generation session.
func (m *Manager) CollectProofs(sess *session) {
coordinatorSessionsActiveNumberGauge.Inc(1)
defer coordinatorSessionsActiveNumberGauge.Dec(1)
for {
select {
//Execute after timeout, set in config.json. Consider all rollers failed.
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
if !m.checkAttemptsExceeded(sess.taskID) {
var success bool
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
success = m.StartBatchProofGenerationSession(nil, sess)
} else if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
success = m.StartChunkProofGenerationSession(nil, sess)
}
if success {
m.mu.Lock()
for _, v := range sess.proverTasks {
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
}
m.mu.Unlock()
log.Info("Retrying session", "session id:", sess.taskID)
return
}
}
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", sess.taskID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset chunk task_status as Unassigned", "task id", sess.taskID, "err", err)
}
}
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset batch task_status as Unassigned", "task id", sess.taskID, "err", err)
}
}
m.mu.Lock()
for _, v := range sess.proverTasks {
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
}
delete(m.sessions, sess.taskID)
m.mu.Unlock()
coordinatorSessionsTimeoutTotalCounter.Inc(1)
return
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
case ret := <-sess.finishChan:
m.mu.Lock()
for idx := range sess.proverTasks {
if sess.proverTasks[idx].ProverPublicKey == ret.pk {
sess.proverTasks[idx].ProvingStatus = int16(ret.status)
}
}
if sess.isSessionFailed() {
if ret.typ == message.ProofTypeChunk {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
if ret.typ == message.ProofTypeBatch {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
log.Error("failed to update batch proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
if err := m.proverTaskOrm.UpdateProverTaskProvingStatus(m.ctx, ret.typ, ret.id, ret.pk, ret.status); err != nil {
log.Error("failed to update session info proving status",
"proof type", ret.typ, "task id", ret.id, "pk", ret.pk, "status", ret.status, "error", err)
}
//Check if all rollers have finished their tasks, and rollers with valid results are indexed by public key.
finished, validRollers := sess.isRollersFinished()
//When all rollers have finished submitting their tasks, select a winner within rollers with valid proof, and return, terminate the for loop.
if finished && len(validRollers) > 0 {
//Select a random index for this slice.
randIndex := rand.Int63n(int64(len(validRollers)))
_ = validRollers[randIndex]
// TODO: reward winner
for _, proverTask := range sess.proverTasks {
m.freeTaskIDForRoller(proverTask.ProverPublicKey, proverTask.TaskID)
delete(m.sessions, proverTask.TaskID)
}
m.mu.Unlock()
coordinatorSessionsSuccessTotalCounter.Inc(1)
return
}
m.mu.Unlock()
}
}
}
// isRollersFinished checks if all rollers have finished submitting proofs, check their validity, and record rollers who produce valid proof.
// When rollersLeft reaches 0, it means all rollers have finished their tasks.
// validRollers also records the public keys of rollers who have finished their tasks correctly as index.
func (s *session) isRollersFinished() (bool, []string) {
var validRollers []string
for _, sessionInfo := range s.proverTasks {
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofValid {
validRollers = append(validRollers, sessionInfo.ProverPublicKey)
continue
}
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofInvalid {
continue
}
// Some rollers are still proving.
return false, nil
}
return true, validRollers
}
func (s *session) isSessionFailed() bool {
for _, sessionInfo := range s.proverTasks {
if types.RollerProveStatus(sessionInfo.ProvingStatus) != types.RollerProofInvalid {
return false
}
}
return true
}
// APIs collect API services.
func (m *Manager) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "roller",
Service: RollerAPI(m),
Public: true,
},
{
Namespace: "debug",
Public: true,
Service: RollerDebugAPI(m),
},
}
}
// StartChunkProofGenerationSession starts a chunk proof generation session
func (m *Manager) StartChunkProofGenerationSession(task *orm.Chunk, prevSession *session) (success bool) {
var taskID string
if task != nil {
taskID = task.Hash
} else {
taskID = prevSession.taskID
}
if m.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
log.Warn("no idle chunk roller when starting proof generation session", "id", taskID)
return false
}
log.Info("start chunk proof generation session", "id", taskID)
defer func() {
if !success {
if task != nil {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
}
} else {
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
}
}
}
}()
// Get block hashes.
wrappedBlocks, err := m.l2BlockOrm.GetL2BlocksByChunkHash(m.ctx, taskID)
if err != nil {
log.Error(
"Failed to fetch wrapped blocks",
"batch hash", taskID,
"error", err,
)
return false
}
blockHashes := make([]common.Hash, len(wrappedBlocks))
for i, wrappedBlock := range wrappedBlocks {
blockHashes[i] = wrappedBlock.Header.Hash()
}
// Dispatch task to chunk rollers.
var proverTasks []*orm.ProverTask
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.ProofTypeChunk)
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(&message.TaskMsg{ID: taskID, Type: message.ProofTypeChunk, BlockHashes: blockHashes}) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
continue
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
proverTask := orm.ProverTask{
TaskID: taskID,
ProverPublicKey: roller.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: roller.Name,
ProvingStatus: int16(types.RollerAssigned),
FailureType: int16(types.RollerFailureTypeUndefined),
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
}
// Store prover task info.
if err = m.proverTaskOrm.SetProverTask(m.ctx, &proverTask); err != nil {
log.Error("db set session info fail", "session id", taskID, "error", err)
return false
}
proverTasks = append(proverTasks, &proverTask)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeChunk, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
}
// No roller assigned.
if len(proverTasks) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle chunk rollers", m.GetNumberOfIdleRollers(message.ProofTypeChunk))
return false
}
// Update session proving status as assigned.
if err = m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", taskID, "err", err)
return false
}
// Create a proof generation session.
sess := &session{
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
m.sessions[taskID] = sess
m.mu.Unlock()
go m.CollectProofs(sess)
return true
}
// StartBatchProofGenerationSession starts an batch proof generation.
func (m *Manager) StartBatchProofGenerationSession(task *orm.Batch, prevSession *session) (success bool) {
var taskID string
if task != nil {
taskID = task.Hash
} else {
taskID = prevSession.taskID
}
if m.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
return false
}
log.Info("start batch proof generation session", "id", taskID)
defer func() {
if !success {
if task != nil {
if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
} else if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
}
}
}
}()
// get chunk proofs from db
chunkProofs, err := m.chunkOrm.GetProofsByBatchHash(m.ctx, taskID)
if err != nil {
log.Error("failed to get chunk proofs for batch task", "session id", taskID, "error", err)
return false
}
// Dispatch task to chunk rollers.
var proverTasks []*orm.ProverTask
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller(message.ProofTypeBatch)
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "type", roller.Type, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(&message.TaskMsg{
ID: taskID,
Type: message.ProofTypeBatch,
SubProofs: chunkProofs,
}) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
continue
}
proverTask := orm.ProverTask{
TaskID: taskID,
ProverPublicKey: roller.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: roller.Name,
ProvingStatus: int16(types.RollerAssigned),
FailureType: int16(types.RollerFailureTypeUndefined),
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
}
// Store session info.
if err = m.proverTaskOrm.SetProverTask(context.Background(), &proverTask); err != nil {
log.Error("db set session info fail", "session id", taskID, "error", err)
return false
}
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
proverTasks = append(proverTasks, &proverTask)
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeBatch, "roller name", roller.Name,
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
}
// No roller assigned.
if len(proverTasks) == 0 {
log.Error("no roller assigned", "id", taskID, "number of idle batch rollers", m.GetNumberOfIdleRollers(message.ProofTypeBatch))
return false
}
// Update session proving status as assigned.
if err = m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", taskID, "err", err)
return false
}
// Create a proof generation session.
sess := &session{
taskID: taskID,
proverTasks: proverTasks,
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
m.sessions[taskID] = sess
m.mu.Unlock()
go m.CollectProofs(sess)
return true
}
func (m *Manager) addFailedSession(sess *session, errMsg string) {
m.mu.Lock()
defer m.mu.Unlock()
m.failedSessionInfos[sess.taskID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
}
// VerifyToken verifies pukey for token and expiration time
func (m *Manager) VerifyToken(authMsg *message.AuthMsg) (bool, error) {
pubkey, _ := authMsg.PublicKey()
// GetValue returns nil if value is expired
if token, ok := m.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
return false, fmt.Errorf("failed to find corresponding token. roller name: %s. roller pk: %s", authMsg.Identity.Name, pubkey)
}
return true, nil
}
func (m *Manager) addVerifyTask(proof *message.AggProof) chan verifyResult {
c := make(chan verifyResult, 1)
m.verifierWorkerPool.AddTask(func() {
result, err := m.verifier.VerifyProof(proof)
c <- verifyResult{result, err}
})
return c
}
func (m *Manager) verifyProof(proof *message.AggProof) (bool, error) {
if !m.isRunning() {
return false, errors.New("coordinator has stopped before verification")
}
verifyResultChan := m.addVerifyTask(proof)
result := <-verifyResultChan
return result.result, result.err
}
type verifyResult struct {
result bool
err error
}

View File

@@ -1,60 +0,0 @@
package coordinator
import (
"time"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
)
type rollerMetrics struct {
rollerProofsVerifiedSuccessTimeTimer geth_metrics.Timer
rollerProofsVerifiedFailedTimeTimer geth_metrics.Timer
rollerProofsGeneratedFailedTimeTimer geth_metrics.Timer
rollerProofsLastAssignedTimestampGauge geth_metrics.Gauge
rollerProofsLastFinishedTimestampGauge geth_metrics.Gauge
}
func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
}
}
}
func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
}
}
}
func (m *Manager) updateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := m.rollerPool.Get(pk); ok {
rMs := node.(*rollerNode).metrics
if rMs != nil {
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
}
}
}

View File

@@ -1,144 +0,0 @@
package coordinator
import (
"crypto/rand"
"fmt"
"math/big"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/log"
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
// rollerNode records roller status and send task to connected roller.
type rollerNode struct {
// Roller name
Name string
// Roller type
Type message.ProofType
// Roller public key
PublicKey string
// Roller version
Version string
// task channel
taskChan chan *message.TaskMsg
// session id list which delivered to roller.
TaskIDs cmap.ConcurrentMap
// Time of message creation
registerTime time.Time
metrics *rollerMetrics
}
func (r *rollerNode) sendTask(msg *message.TaskMsg) bool {
select {
case r.taskChan <- msg:
r.TaskIDs.Set(msg.ID, struct{}{})
default:
log.Warn("roller channel is full", "roller name", r.Name, "public key", r.PublicKey)
return false
}
return true
}
func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
m.mu.RLock()
defer m.mu.RUnlock()
taskIDs := cmap.New()
for id, sess := range m.sessions {
for _, proverTask := range sess.proverTasks {
if proverTask.ProverPublicKey == pubkey && proverTask.ProvingStatus == int16(types.RollerAssigned) {
taskIDs.Set(id, struct{}{})
}
}
}
return &taskIDs
}
func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
node, ok := m.rollerPool.Get(pubkey)
if !ok {
taskIDs := m.reloadRollerAssignedTasks(pubkey)
rMs := &rollerMetrics{
rollerProofsVerifiedSuccessTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsVerifiedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsGeneratedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", pubkey), metrics.ScrollRegistry),
rollerProofsLastAssignedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", pubkey), metrics.ScrollRegistry),
rollerProofsLastFinishedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", pubkey), metrics.ScrollRegistry),
}
node = &rollerNode{
Name: identity.Name,
Type: identity.RollerType,
Version: identity.Version,
PublicKey: pubkey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
metrics: rMs,
}
m.rollerPool.Set(pubkey, node)
}
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "prover_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
roller.registerTime = time.Now()
return roller.taskChan, nil
}
func (m *Manager) freeRoller(pk string) {
m.rollerPool.Pop(pk)
}
func (m *Manager) existTaskIDForRoller(pk string, id string) bool {
if node, ok := m.rollerPool.Get(pk); ok {
r := node.(*rollerNode)
return r.TaskIDs.Has(id)
}
return false
}
func (m *Manager) freeTaskIDForRoller(pk string, id string) {
if node, ok := m.rollerPool.Get(pk); ok {
r := node.(*rollerNode)
r.TaskIDs.Pop(id)
}
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
for _, pk := range m.rollerPool.Keys() {
if val, ok := m.rollerPool.Get(pk); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
count++
}
}
}
return count
}
func (m *Manager) selectRoller(rollerType message.ProofType) *rollerNode {
pubkeys := m.rollerPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
if val, ok := m.rollerPool.Get(pubkeys[idx.Int64()]); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
return r
}
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}
return nil
}

View File

@@ -1,24 +1,19 @@
package coordinator_test
package test
import (
"compress/flate"
"context"
"crypto/ecdsa"
"crypto/rand"
"encoding/json"
"fmt"
"math/big"
"net/http"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
@@ -26,17 +21,18 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/coordinator"
client2 "scroll-tech/coordinator/client"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/verifier"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/client"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/logic/rollermanager"
"scroll-tech/coordinator/internal/orm"
)
var (
@@ -65,6 +61,34 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, resetDB bool) (*http.Server, *cron.Collector) {
var err error
db, err = database.InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
if resetDB {
assert.NoError(t, migrate.ResetDB(sqlDB))
}
conf := config.Config{
RollerManagerConfig: &config.RollerManagerConfig{
RollersPerSession: rollersPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
MaxVerifierWorkers: 10,
SessionAttempts: 2,
},
}
proofCollector := cron.NewCollector(context.Background(), db, &conf)
tmpAPI := api.RegisterAPIs(&conf, db)
handler, _, err := utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], tmpAPI, flate.NoCompression)
assert.NoError(t, err)
rollermanager.InitRollerManager(db)
return handler, proofCollector
}
func setEnv(t *testing.T) {
base = docker.NewDockerApp()
base.RunDBImage(t)
@@ -87,13 +111,13 @@ func setEnv(t *testing.T) {
chunkOrm = orm.NewChunk(db)
l2BlockOrm = orm.NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
assert.NoError(t, err)
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
@@ -114,10 +138,9 @@ func TestApis(t *testing.T) {
t.Run("TestValidProof", testValidProof)
t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
t.Run("TestTimedoutProof", testTimedoutProof)
t.Run("TestTimeoutProof", testTimeoutProof)
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
t.Run("TestGracefulRestart", testGracefulRestart)
t.Run("TestListRollers", testListRollers)
// Teardown
t.Cleanup(func() {
@@ -128,10 +151,10 @@ func TestApis(t *testing.T) {
func testHandshake(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
proofCollector.Stop()
}()
roller1 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
@@ -140,17 +163,17 @@ func testHandshake(t *testing.T) {
roller2 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
defer roller2.close()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
}
func testFailedHandshake(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
proofCollector.Stop()
}()
// prepare
@@ -160,7 +183,7 @@ func testFailedHandshake(t *testing.T) {
// Try to perform handshake without token
// create a new ws connection
client, err := client2.DialContext(ctx, wsURL)
c, err := client.DialContext(ctx, wsURL)
assert.NoError(t, err)
// create private key
privkey, err := crypto.GenerateKey()
@@ -172,12 +195,12 @@ func testFailedHandshake(t *testing.T) {
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
// Try to perform handshake with timeouted token
// create a new ws connection
client, err = client2.DialContext(ctx, wsURL)
c, err = client.DialContext(ctx, wsURL)
assert.NoError(t, err)
// create private key
privkey, err = crypto.GenerateKey()
@@ -189,26 +212,25 @@ func testFailedHandshake(t *testing.T) {
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
token, err := client.RequestToken(ctx, authMsg)
token, err := c.RequestToken(ctx, authMsg)
assert.NoError(t, err)
authMsg.Identity.Token = token
assert.NoError(t, authMsg.SignWithKey(privkey))
<-time.After(6 * time.Second)
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 0, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
}
func testSeveralConnections(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
proofCollector.Stop()
}()
var (
@@ -227,8 +249,8 @@ func testSeveralConnections(t *testing.T) {
assert.NoError(t, eg.Wait())
// check roller's idle connections
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, batch/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, batch/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
// close connection
for _, roller := range rollers {
@@ -237,12 +259,12 @@ func testSeveralConnections(t *testing.T) {
var (
tick = time.Tick(time.Second)
tickStop = time.Tick(time.Second * 15)
tickStop = time.Tick(time.Minute)
)
for {
select {
case <-tick:
if rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
return
}
case <-tickStop:
@@ -253,12 +275,11 @@ func testSeveralConnections(t *testing.T) {
}
func testValidProof(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
handler, collector := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create mock rollers.
@@ -286,8 +307,8 @@ func testValidProof(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -301,7 +322,7 @@ func testValidProof(t *testing.T) {
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
tickStop = time.Tick(time.Minute)
)
for {
select {
@@ -323,10 +344,10 @@ func testValidProof(t *testing.T) {
func testInvalidProof(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
handler, collector := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create mock rollers.
@@ -347,8 +368,8 @@ func testInvalidProof(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -362,7 +383,7 @@ func testInvalidProof(t *testing.T) {
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
tickStop = time.Tick(time.Minute)
)
for {
select {
@@ -384,10 +405,10 @@ func testInvalidProof(t *testing.T) {
func testProofGeneratedFailed(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
handler, collector := setupCoordinator(t, 3, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create mock rollers.
@@ -408,8 +429,8 @@ func testProofGeneratedFailed(t *testing.T) {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -423,7 +444,7 @@ func testProofGeneratedFailed(t *testing.T) {
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
tickStop = time.Tick(time.Minute)
)
for {
select {
@@ -442,13 +463,13 @@ func testProofGeneratedFailed(t *testing.T) {
}
}
func testTimedoutProof(t *testing.T) {
func testTimeoutProof(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, collector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create first chunk & batch mock roller, that will not send any proof.
@@ -459,8 +480,8 @@ func testTimedoutProof(t *testing.T) {
chunkRoller1.close()
batchRoller1.close()
}()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -495,8 +516,8 @@ func testTimedoutProof(t *testing.T) {
chunkRoller2.close()
batchRoller2.close()
}()
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
// verify proof status, it should be verified now, because second roller sent valid proof
ok = utils.TryTimes(200, func() bool {
@@ -516,10 +537,10 @@ func testTimedoutProof(t *testing.T) {
func testIdleRollerSelection(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, collector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
}()
// create mock rollers.
@@ -541,8 +562,8 @@ func testIdleRollerSelection(t *testing.T) {
}
}()
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
assert.Equal(t, len(rollers)/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
assert.Equal(t, len(rollers)/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -578,7 +599,7 @@ func testIdleRollerSelection(t *testing.T) {
func testGracefulRestart(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
handler, collector := setupCoordinator(t, 1, wsURL, true)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
@@ -602,25 +623,21 @@ func testGracefulRestart(t *testing.T) {
chunkRoller.close()
batchRoller.close()
info, err := rollerManager.GetSessionInfo(dbChunk.Hash)
provingStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
assert.Equal(t, types.ProvingTaskAssigned, provingStatus)
// Close rollerManager and ws handler.
handler.Shutdown(context.Background())
rollerManager.Stop()
collector.Stop()
// Setup new coordinator and ws server.
newRollerManager, newHandler := setupCoordinator(t, 1, wsURL, false)
newHandler, newCollector := setupCoordinator(t, 1, wsURL, false)
defer func() {
newHandler.Shutdown(context.Background())
newRollerManager.Stop()
newCollector.Stop()
}()
info, err = newRollerManager.GetSessionInfo(dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
// at this point, roller haven't submitted
status, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
@@ -660,219 +677,3 @@ func testGracefulRestart(t *testing.T) {
}
}
}
func testListRollers(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
var names = []string{
"roller_test_1",
"roller_test_2",
"roller_test_3",
"roller_test_4",
}
roller1 := newMockRoller(t, names[0], wsURL, message.ProofTypeChunk)
roller2 := newMockRoller(t, names[1], wsURL, message.ProofTypeBatch)
roller3 := newMockRoller(t, names[2], wsURL, message.ProofTypeChunk)
roller4 := newMockRoller(t, names[3], wsURL, message.ProofTypeBatch)
defer func() {
roller1.close()
roller2.close()
}()
// test ListRollers API
rollers, err := rollerManager.ListRollers()
assert.NoError(t, err)
var rollersName []string
for _, roller := range rollers {
rollersName = append(rollersName, roller.Name)
}
sort.Strings(rollersName)
assert.True(t, reflect.DeepEqual(names, rollersName))
// test ListRollers if two rollers closed.
roller3.close()
roller4.close()
// wait coordinator free completely
time.Sleep(time.Second * 5)
rollers, err = rollerManager.ListRollers()
assert.NoError(t, err)
var newRollersName []string
for _, roller := range rollers {
newRollersName = append(newRollersName, roller.Name)
}
sort.Strings(newRollersName)
assert.True(t, reflect.DeepEqual(names[:2], newRollersName))
}
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, resetDB bool) (rollerManager *coordinator.Manager, handler *http.Server) {
db, err := database.InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
if resetDB {
assert.NoError(t, migrate.ResetDB(sqlDB))
}
rollerManager, err = coordinator.New(context.Background(), &config.RollerManagerConfig{
RollersPerSession: rollersPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
MaxVerifierWorkers: 10,
SessionAttempts: 2,
}, db)
assert.NoError(t, err)
assert.NoError(t, rollerManager.Start())
// start ws service
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
assert.NoError(t, err)
return rollerManager, handler
}
type mockRoller struct {
rollerName string
privKey *ecdsa.PrivateKey
proofType message.ProofType
wsURL string
client *client2.Client
taskCh chan *message.TaskMsg
taskCache sync.Map
sub ethereum.Subscription
stopCh chan struct{}
}
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
roller := &mockRoller{
rollerName: rollerName,
privKey: privKey,
proofType: proofType,
wsURL: wsURL,
taskCh: make(chan *message.TaskMsg, 4),
stopCh: make(chan struct{}),
}
roller.client, roller.sub, err = roller.connectToCoordinator()
assert.NoError(t, err)
return roller
}
// connectToCoordinator sets up a websocket client to connect to the roller manager.
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
// Create connection.
client, err := client2.Dial(r.wsURL)
if err != nil {
return nil, nil, err
}
// create a new ws connection
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
RollerType: r.proofType,
},
}
_ = authMsg.SignWithKey(r.privKey)
token, err := client.RequestToken(context.Background(), authMsg)
if err != nil {
return nil, nil, err
}
authMsg.Identity.Token = token
_ = authMsg.SignWithKey(r.privKey)
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
if err != nil {
return nil, nil, err
}
return client, sub, nil
}
func (r *mockRoller) releaseTasks() {
r.taskCache.Range(func(key, value any) bool {
r.taskCh <- value.(*message.TaskMsg)
r.taskCache.Delete(key)
return true
})
}
type proofStatus uint32
const (
verifiedSuccess proofStatus = iota
verifiedFailed
generatedFailed
)
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
if reconnect {
var err error
r.client, r.sub, err = r.connectToCoordinator()
if err != nil {
t.Fatal(err)
return
}
}
// Release cached tasks.
r.releaseTasks()
r.stopCh = make(chan struct{})
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
}
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
r.taskCache.Store(task.ID, task)
// simulate proof time
select {
case <-time.After(proofTime):
case <-stopCh:
return
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: task.ID,
Type: r.proofType,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
if proofStatus == generatedFailed {
proof.Status = message.StatusProofError
} else if proofStatus == verifiedFailed {
proof.ProofDetail.Proof.Proof = []byte(verifier.InvalidTestProof)
}
assert.NoError(t, proof.Sign(r.privKey))
assert.NoError(t, client.SubmitProof(context.Background(), proof))
case <-stopCh:
return
}
}
}
func (r *mockRoller) close() {
close(r.stopCh)
r.sub.Unsubscribe()
}

View File

@@ -0,0 +1,156 @@
package test
import (
"context"
"crypto/ecdsa"
"sync"
"testing"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"scroll-tech/common/types/message"
client2 "scroll-tech/coordinator/client"
"scroll-tech/coordinator/internal/logic/verifier"
)
type proofStatus uint32
const (
verifiedSuccess proofStatus = iota
verifiedFailed
generatedFailed
)
type mockRoller struct {
rollerName string
privKey *ecdsa.PrivateKey
proofType message.ProofType
wsURL string
client *client2.Client
taskCh chan *message.TaskMsg
taskCache sync.Map
sub ethereum.Subscription
stopCh chan struct{}
}
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
roller := &mockRoller{
rollerName: rollerName,
privKey: privKey,
proofType: proofType,
wsURL: wsURL,
taskCh: make(chan *message.TaskMsg, 4),
stopCh: make(chan struct{}),
}
roller.client, roller.sub, err = roller.connectToCoordinator()
assert.NoError(t, err)
return roller
}
// connectToCoordinator sets up a websocket client to connect to the roller manager.
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
// Create connection.
client, err := client2.Dial(r.wsURL)
if err != nil {
return nil, nil, err
}
// create a new ws connection
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
RollerType: r.proofType,
},
}
_ = authMsg.SignWithKey(r.privKey)
token, err := client.RequestToken(context.Background(), authMsg)
if err != nil {
return nil, nil, err
}
authMsg.Identity.Token = token
_ = authMsg.SignWithKey(r.privKey)
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
if err != nil {
return nil, nil, err
}
return client, sub, nil
}
func (r *mockRoller) releaseTasks() {
r.taskCache.Range(func(key, value any) bool {
r.taskCh <- value.(*message.TaskMsg)
r.taskCache.Delete(key)
return true
})
}
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
if reconnect {
var err error
r.client, r.sub, err = r.connectToCoordinator()
if err != nil {
t.Fatal(err)
return
}
}
// Release cached tasks.
r.releaseTasks()
r.stopCh = make(chan struct{})
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
}
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
r.taskCache.Store(task.ID, task)
// simulate proof time
select {
case <-time.After(proofTime):
case <-stopCh:
return
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: task.ID,
Type: r.proofType,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
if proofStatus == generatedFailed {
proof.Status = message.StatusProofError
} else if proofStatus == verifiedFailed {
proof.ProofDetail.Proof.Proof = []byte(verifier.InvalidTestProof)
}
assert.NoError(t, proof.Sign(r.privKey))
assert.NoError(t, client.SubmitProof(context.Background(), proof))
case <-stopCh:
return
}
}
}
func (r *mockRoller) close() {
close(r.stopCh)
r.sub.Unsubscribe()
}

545
coordinator/testdata/blockTrace_02.json vendored Normal file
View File

@@ -0,0 +1,545 @@
{
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"header": {
"parentHash": "0xe17f08d25ef61a8ee12aa29704b901345a597f5e45a9a0f603ae0f70845b54dc",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"transactionsRoot": "0x3057754c197f33e1fe799e996db6232b5257412feea05b3c1754738f0b33fe32",
"receiptsRoot": "0xd95b673818fa493deec414e01e610d97ee287c9421c8eff4102b1647c1a184e4",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x2",
"gasLimit": "0x355418d1e8184",
"gasUsed": "0xa410",
"timestamp": "0x63807b2a",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000004b54a94f0df14333e63c8a13dfe6097c1a08b5fd2c225a8dc0f199dae245aead55d6f774a980a0c925be407748d56a14106afda7ddc1dec342e7ee3b0d58a8df01",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x1de9",
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
},
"transactions": [
{
"type": 0,
"nonce": 0,
"txHash": "0xb2febc1213baec968f6575789108e175273b8da8f412468098893084229f1542",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514",
"s": "0x34cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b"
},
{
"type": 0,
"nonce": 1,
"txHash": "0xe6ac2ffc543d07f1e280912a2abe3aa659bf83773740681151297ada1bb211dd",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xf039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316",
"s": "0x5a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1"
}
],
"storageTrace": {
"rootBefore": "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
"rootAfter": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"proofs": {
"0x01bae6BF68E9A03Fb2bc0615b1bf0d69ce9411eD": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0xc0c4C8bAEA3f6Acb49b6E1fb9e2ADEcEeaCB0cA2": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": [
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
},
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
}
],
"mptwitness": [
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
},
{
"pathPart": "0x3",
"root": "0xaf16fd780a8c7616b95b20da69f4ff26e0253238e996f9516445d6d6bf92b725",
"path": [
{
"value": "0x5bbe97e7e66485b203f9dfea64eb7fa7df06959b12cbde2beba14f8f91133a13",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
},
{
"value": "0x2e591357b02ab3117c35ad94a4e1a724fdbd95d6463da1f6c8017e6d000ecf02",
"sibling": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
{
"value": "0x794953bb5d8aa00f90383ff435ce2ea58e30e1da1061e69455c38496766ec10f",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
}
]
}

12877
coordinator/testdata/blockTrace_03.json vendored Normal file

File diff suppressed because one or more lines are too long

View File

@@ -4,7 +4,7 @@ go 1.19
require (
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.7
github.com/lib/pq v1.10.9
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230613025759-f055f50f9d56
github.com/stretchr/testify v1.8.3
@@ -14,11 +14,11 @@ require (
require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-sql-driver/mysql v1.7.0 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-sqlite3 v1.14.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect

View File

@@ -4,8 +4,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
@@ -20,13 +20,13 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=

View File

@@ -42,7 +42,6 @@ github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VY
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJerTHwaBxut1C0=
github.com/ClickHouse/clickhouse-go/v2 v2.2.0/go.mod h1:8f2XZUi7XoeU+uPIytSi1cvx8fmJxi7vIgqpvYTF1+o=
@@ -431,6 +430,7 @@ github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=
github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U=
@@ -519,7 +519,6 @@ golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -530,7 +529,6 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BG
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -5,16 +5,16 @@ import (
"math/big"
"testing"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/prover-stats-api/internal/config"
"scroll-tech/prover-stats-api/internal/orm"
)

View File

@@ -7,6 +7,12 @@ import (
"io"
"math/big"
"net/http"
"testing"
"github.com/gin-gonic/gin"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/database/migrate"
@@ -14,19 +20,11 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/types"
"github.com/gin-gonic/gin"
"testing"
"scroll-tech/prover-stats-api/internal/config"
"scroll-tech/prover-stats-api/internal/controller"
"scroll-tech/prover-stats-api/internal/orm"
"scroll-tech/prover-stats-api/internal/route"
api_types "scroll-tech/prover-stats-api/internal/types"
"github.com/shopspring/decimal"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
apitypes "scroll-tech/prover-stats-api/internal/types"
)
var (
@@ -113,10 +111,10 @@ func getResp(t *testing.T, url string) interface{} {
byt, err := io.ReadAll(resp.Body)
assert.NoError(t, err)
res := new(api_types.Response)
res := new(apitypes.Response)
assert.NoError(t, json.Unmarshal(byt, res))
t.Log("----byt is ", string(byt))
assert.Equal(t, api_types.Success, res.ErrCode)
assert.Equal(t, apitypes.Success, res.ErrCode)
return res.Data
}

View File

@@ -33,7 +33,7 @@ var (
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/conf/config.json")
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/config.json")
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
rollerApp = rapp.NewRollerApp(base, "../../roller/config.json", coordinatorApp.WSEndpoint())
m.Run()
bridgeApp.Free()