mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-04-23 03:00:50 -04:00
feat: resolve conflict
This commit is contained in:
@@ -7,11 +7,11 @@ require (
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/kataras/iris/v12 v12.2.0
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.18
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/pressly/goose/v3 v3.7.0
|
||||
github.com/pressly/goose/v3 v3.11.2
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
)
|
||||
@@ -44,7 +44,6 @@ require (
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/docker/docker v20.10.21+incompatible // indirect
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
@@ -54,7 +53,6 @@ require (
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/getsentry/sentry-go v0.18.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
@@ -85,13 +83,13 @@ require (
|
||||
github.com/kataras/pio v0.0.11 // indirect
|
||||
github.com/kataras/sitemap v0.0.6 // indirect
|
||||
github.com/kataras/tunnel v0.0.4 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/klauspost/compress v1.16.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mailgun/raymond/v2 v2.0.48 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mediocregopher/radix/v3 v3.8.1 // indirect
|
||||
github.com/microcosm-cc/bluemonday v1.0.23 // indirect
|
||||
@@ -141,7 +139,6 @@ require (
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
google.golang.org/protobuf v1.29.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
|
||||
@@ -97,9 +97,9 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE=
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -144,8 +144,7 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
@@ -286,8 +285,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
||||
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
|
||||
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
@@ -303,8 +302,8 @@ github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awS
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw=
|
||||
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
|
||||
@@ -333,8 +332,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.14 h1:qZgc/Rwetq+MtyE18WhzjokPD93dNqLGNT3QJuLvBGw=
|
||||
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
@@ -406,8 +405,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pressly/goose/v3 v3.7.0 h1:jblaZul15uCIEKHRu5KUdA+5wDA7E60JC0TOthdrtf8=
|
||||
github.com/pressly/goose/v3 v3.7.0/go.mod h1:N5gqPdIzdxf3BiPWdmoPreIwHStkxsvKWE5xjUvfYNk=
|
||||
github.com/pressly/goose/v3 v3.11.2 h1:QgTP45FhBBHdmf7hWKlbWFHtwPtxo0phSDkwDKGUrYs=
|
||||
github.com/pressly/goose/v3 v3.11.2/go.mod h1:LWQzSc4vwfHA/3B8getTp8g3J5Z8tFBxgxinmGlMlJk=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -417,7 +416,7 @@ github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8u
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
@@ -661,7 +660,6 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -726,16 +724,16 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
|
||||
modernc.org/cc/v3 v3.36.1 h1:CICrjwr/1M4+6OQ4HJZ/AHxjcwe67r5vPUF518MkO8A=
|
||||
modernc.org/ccgo/v3 v3.16.8 h1:G0QNlTqI5uVgczBWfGKs7B++EPwCfXPWGD2MdeKloDs=
|
||||
modernc.org/libc v1.16.19 h1:S8flPn5ZeXx6iw/8yNa986hwTQDrY8RXU7tObZuAozo=
|
||||
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
|
||||
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
|
||||
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
|
||||
modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
|
||||
modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
|
||||
modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
|
||||
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
||||
modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
|
||||
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
|
||||
modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
|
||||
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
modernc.org/sqlite v1.22.1 h1:P2+Dhp5FR1RlVRkQ3dDfCiv3Ok8XPxqpe70IjYVA9oE=
|
||||
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||
|
||||
@@ -75,6 +75,10 @@ linters-settings:
|
||||
# report about shadowed variables
|
||||
check-shadowing: true
|
||||
|
||||
gosec:
|
||||
disable:
|
||||
- G108
|
||||
|
||||
golint:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
min-confidence: 0.8
|
||||
@@ -220,7 +224,12 @@ issues:
|
||||
- lll
|
||||
source: "^//go:generate "
|
||||
text: "long-lines"
|
||||
|
||||
|
||||
# Exclude gosec issues for G108: Profiling endpoint is automatically exposed
|
||||
- linters:
|
||||
- gosec
|
||||
text: "G108"
|
||||
|
||||
- linters:
|
||||
- wsl
|
||||
text: "return statements should not be cuddled if block has more than two lines"
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
@@ -65,8 +67,12 @@ func (i *ImgDB) Stop() error {
|
||||
if i.id == "" {
|
||||
i.id = GetContainerID(i.name)
|
||||
}
|
||||
timeout := time.Second * 3
|
||||
if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
|
||||
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the stopped container.
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
@@ -135,8 +137,11 @@ func (i *ImgGeth) Stop() error {
|
||||
// check if container is running, stop the running container.
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
timeout := time.Second * 3
|
||||
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
i.id = id
|
||||
|
||||
@@ -3,9 +3,9 @@ module scroll-tech/common
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
github.com/docker/docker v23.0.6+incompatible
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.7
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.18
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
@@ -18,7 +18,8 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
@@ -36,7 +37,7 @@ require (
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
@@ -62,16 +63,16 @@ require (
|
||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/onsi/gomega v1.27.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
|
||||
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
@@ -84,7 +85,6 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/scroll-tech/zktrie v0.5.3 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
|
||||
@@ -19,8 +19,8 @@ test:
|
||||
|
||||
libzkp:
|
||||
cd ../common/libzkp/impl && cargo clean && cargo build --release && cp ./target/release/libzkp.so ../interface/
|
||||
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
|
||||
find ../common | grep libzktrie.so | xargs -I{} cp {} ./verifier/lib/
|
||||
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
|
||||
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/verifier/lib
|
||||
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
@@ -29,13 +29,13 @@ mock_coordinator: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
test-verifier: libzkp
|
||||
go test -tags ffi -timeout 0 -v ./verifier
|
||||
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
|
||||
|
||||
test-gpu-verifier: libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./verifier
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./internal/logic/verifier
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
cp -r ../common/libzkp/interface ./verifier/lib
|
||||
cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
@@ -45,4 +45,4 @@ docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator.Dockerfile
|
||||
|
||||
docker_push:
|
||||
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
|
||||
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
|
||||
@@ -1,129 +0,0 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
var (
|
||||
coordinatorRollersDisconnectsTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/rollers/disconnects/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// RollerAPI for rollers inorder to register and submit proof
|
||||
type RollerAPI interface {
|
||||
RequestToken(authMsg *message.AuthMsg) (string, error)
|
||||
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
|
||||
SubmitProof(proof *message.ProofMsg) error
|
||||
}
|
||||
|
||||
// RequestToken generates and sends back register token for roller
|
||||
func (m *Manager) RequestToken(authMsg *message.AuthMsg) (string, error) {
|
||||
if ok, err := authMsg.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify auth message", "error", err)
|
||||
}
|
||||
return "", errors.New("signature verification failed")
|
||||
}
|
||||
pubkey, _ := authMsg.PublicKey()
|
||||
if token, ok := m.tokenCache.Get(pubkey); ok {
|
||||
return token.(string), nil
|
||||
}
|
||||
token, err := message.GenerateToken()
|
||||
if err != nil {
|
||||
return "", errors.New("token generation failed")
|
||||
}
|
||||
m.tokenCache.Set(pubkey, token, cache.DefaultExpiration)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// Register register api for roller
|
||||
func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
// Verify register message.
|
||||
if ok, err := authMsg.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify auth message", "error", err)
|
||||
}
|
||||
return nil, errors.New("signature verification failed")
|
||||
}
|
||||
pubkey, _ := authMsg.PublicKey()
|
||||
|
||||
// Lock here to avoid malicious roller message replay before cleanup of token
|
||||
m.registerMu.Lock()
|
||||
if ok, err := m.VerifyToken(authMsg); !ok {
|
||||
m.registerMu.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
// roller successfully registered, remove token associated with this roller
|
||||
m.tokenCache.Delete(pubkey)
|
||||
m.registerMu.Unlock()
|
||||
|
||||
// create or get the roller message channel
|
||||
taskCh, err := m.register(pubkey, authMsg.Identity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
||||
}
|
||||
rpcSub := notifier.CreateSubscription()
|
||||
go func() {
|
||||
defer func() {
|
||||
m.freeRoller(pubkey)
|
||||
log.Info("roller unregister", "name", authMsg.Identity.Name, "pubkey", pubkey)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case task := <-taskCh:
|
||||
notifier.Notify(rpcSub.ID, task) //nolint
|
||||
case err := <-rpcSub.Err():
|
||||
coordinatorRollersDisconnectsTotalCounter.Inc(1)
|
||||
log.Warn("client stopped the ws connection", "name", authMsg.Identity.Name, "pubkey", pubkey, "err", err)
|
||||
return
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
log.Info("roller register", "name", authMsg.Identity.Name, "pubkey", pubkey, "version", authMsg.Identity.Version)
|
||||
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// SubmitProof roller pull proof
|
||||
func (m *Manager) SubmitProof(proof *message.ProofMsg) error {
|
||||
// Verify the signature
|
||||
if ok, err := proof.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify proof message", "error", err)
|
||||
}
|
||||
return errors.New("auth signature verify fail")
|
||||
}
|
||||
|
||||
pubkey, _ := proof.PublicKey()
|
||||
// Only allow registered pub-key.
|
||||
if !m.existTaskIDForRoller(pubkey, proof.ID) {
|
||||
return fmt.Errorf("the roller or session id doesn't exist, pubkey: %s, ID: %s", pubkey, proof.ID)
|
||||
}
|
||||
|
||||
m.updateMetricRollerProofsLastFinishedTimestampGauge(pubkey)
|
||||
|
||||
err := m.handleZkProof(pubkey, proof.ProofDetail)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer m.freeTaskIDForRoller(pubkey, proof.ID)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// RollerDebugAPI roller api interface in order go get debug message.
|
||||
type RollerDebugAPI interface {
|
||||
// ListRollers returns all live rollers
|
||||
ListRollers() ([]*RollerInfo, error)
|
||||
// GetSessionInfo returns the session information given the session id.
|
||||
GetSessionInfo(sessionID string) (*SessionInfo, error)
|
||||
}
|
||||
|
||||
// RollerInfo records the roller name, pub key and active session info (id, start time).
|
||||
type RollerInfo struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
PublicKey string `json:"public_key"`
|
||||
ActiveSession string `json:"active_session,omitempty"`
|
||||
ActiveSessionStartTime time.Time `json:"active_session_start_time"` // latest proof start time.
|
||||
}
|
||||
|
||||
// SessionInfo records proof create or proof verify failed session.
|
||||
type SessionInfo struct {
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
FinishTime time.Time `json:"finish_time,omitempty"` // set to 0 if not finished
|
||||
AssignedRollers []string `json:"assigned_rollers,omitempty"` // roller name list
|
||||
Error string `json:"error,omitempty"` // empty string if no error encountered
|
||||
}
|
||||
|
||||
// ListRollers returns all live rollers.
|
||||
func (m *Manager) ListRollers() ([]*RollerInfo, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
var res []*RollerInfo
|
||||
for _, pk := range m.rollerPool.Keys() {
|
||||
node, exist := m.rollerPool.Get(pk)
|
||||
if !exist {
|
||||
continue
|
||||
}
|
||||
roller := node.(*rollerNode)
|
||||
info := &RollerInfo{
|
||||
Name: roller.Name,
|
||||
Version: roller.Version,
|
||||
PublicKey: pk,
|
||||
}
|
||||
for id, sess := range m.sessions {
|
||||
for _, proverTask := range sess.proverTasks {
|
||||
if proverTask.ProverPublicKey == pk {
|
||||
info.ActiveSessionStartTime = proverTask.CreatedAt
|
||||
info.ActiveSession = id
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
res = append(res, info)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func newSessionInfo(sess *session, status types.ProvingStatus, errMsg string, finished bool) *SessionInfo {
|
||||
now := time.Now()
|
||||
var nameList []string
|
||||
for _, proverTask := range sess.proverTasks {
|
||||
nameList = append(nameList, proverTask.ProverName)
|
||||
}
|
||||
info := SessionInfo{
|
||||
ID: sess.taskID,
|
||||
Status: status.String(),
|
||||
AssignedRollers: nameList,
|
||||
StartTime: sess.proverTasks[0].CreatedAt,
|
||||
Error: errMsg,
|
||||
}
|
||||
if finished {
|
||||
info.FinishTime = now
|
||||
}
|
||||
return &info
|
||||
}
|
||||
|
||||
// GetSessionInfo returns the session information given the session id.
|
||||
func (m *Manager) GetSessionInfo(sessionID string) (*SessionInfo, error) {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
if info, ok := m.failedSessionInfos[sessionID]; ok {
|
||||
return info, nil
|
||||
}
|
||||
if s, ok := m.sessions[sessionID]; ok {
|
||||
return newSessionInfo(s, types.ProvingTaskAssigned, "", false), nil
|
||||
}
|
||||
return nil, fmt.Errorf("no such session, sessionID: %s", sessionID)
|
||||
}
|
||||
@@ -1,221 +0,0 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
func geneAuthMsg(t *testing.T) *message.AuthMsg {
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test1",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, authMsg.SignWithKey(privKey))
|
||||
return authMsg
|
||||
}
|
||||
|
||||
var rollerManager *Manager
|
||||
|
||||
func init() {
|
||||
rmConfig := config.RollerManagerConfig{}
|
||||
rmConfig.Verifier = &config.VerifierConfig{MockMode: true}
|
||||
rollerManager, _ = New(context.Background(), &rmConfig, nil)
|
||||
}
|
||||
|
||||
func TestManager_RequestToken(t *testing.T) {
|
||||
convey.Convey("auth msg verify failure", t, func() {
|
||||
tmpAuthMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test_request_token",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
token, err := rollerManager.RequestToken(tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, token)
|
||||
})
|
||||
|
||||
convey.Convey("token has already been distributed", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
key, _ := tmpAuthMsg.PublicKey()
|
||||
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
|
||||
rollerManager.tokenCache.Set(key, tokenCacheStored, time.Hour)
|
||||
token, err := rollerManager.RequestToken(tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, token, tokenCacheStored)
|
||||
})
|
||||
|
||||
convey.Convey("token generation failure", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
|
||||
return "", errors.New("token generation failed")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
token, err := rollerManager.RequestToken(tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, token)
|
||||
})
|
||||
|
||||
convey.Convey("token generation success", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
|
||||
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
|
||||
return tokenCacheStored, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
token, err := rollerManager.RequestToken(tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tokenCacheStored, token)
|
||||
})
|
||||
}
|
||||
|
||||
func TestManager_Register(t *testing.T) {
|
||||
convey.Convey("auth msg verify failure", t, func() {
|
||||
tmpAuthMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test_register",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("verify token failure", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return false, errors.New("verify token failure")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("register failure", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
patchGuard.ApplyPrivateMethod(rollerManager, "register", func(*Manager, string, *message.Identity) (<-chan *message.TaskMsg, error) {
|
||||
return nil, errors.New("register error")
|
||||
})
|
||||
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("notifier failure", t, func() {
|
||||
tmpAuthMsg := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyMethodFunc(rollerManager, "VerifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
|
||||
return nil, false
|
||||
})
|
||||
subscription, err := rollerManager.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
|
||||
assert.Equal(t, *subscription, rpc.Subscription{})
|
||||
})
|
||||
}
|
||||
|
||||
func TestManager_SubmitProof(t *testing.T) {
|
||||
id := "10000"
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: id,
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
|
||||
var rp rollerNode
|
||||
rp.TaskIDs = cmap.New()
|
||||
rp.TaskIDs.Set(id, id)
|
||||
|
||||
convey.Convey("verify failure", t, func() {
|
||||
var s *message.ProofMsg
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s, "Verify", func() (bool, error) {
|
||||
return false, errors.New("proof verify error")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
err := rollerManager.SubmitProof(proof)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("existTaskIDForRoller failure", t, func() {
|
||||
var s *cmap.ConcurrentMap
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
|
||||
return nil, true
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var pm *message.ProofMsg
|
||||
patchGuard.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
err := rollerManager.SubmitProof(proof)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("handleZkProof failure", t, func() {
|
||||
var pm *message.ProofMsg
|
||||
patchGuard := gomonkey.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var s cmap.ConcurrentMap
|
||||
patchGuard.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
|
||||
return &rp, true
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(rollerManager, "handleZkProof", func(manager *Manager, pk string, msg *message.ProofDetail) error {
|
||||
return errors.New("handle zk proof error")
|
||||
})
|
||||
|
||||
err := rollerManager.SubmitProof(proof)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("SubmitProof success", t, func() {
|
||||
var pm *message.ProofMsg
|
||||
patchGuard := gomonkey.ApplyMethodFunc(pm, "Verify", func() (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var s cmap.ConcurrentMap
|
||||
patchGuard.ApplyMethodFunc(s, "Get", func(key string) (interface{}, bool) {
|
||||
return &rp, true
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(rollerManager, "handleZkProof", func(manager *Manager, pk string, msg *message.ProofDetail) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
err := rollerManager.SubmitProof(proof)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@@ -6,6 +6,9 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
// enable the pprof
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -14,8 +17,10 @@ import (
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
"scroll-tech/coordinator/internal/logic/rollermanager"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
@@ -37,44 +42,34 @@ func init() {
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// init db handler
|
||||
// Start metrics server.
|
||||
metrics.Serve(context.Background(), ctx)
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
proofCollector := cron.NewCollector(subCtx, db, cfg)
|
||||
|
||||
rollermanager.InitRollerManager()
|
||||
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
log.Error("can not close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Initialize all coordinator modules.
|
||||
rollerManager, err := coordinator.New(subCtx, cfg.RollerManagerConfig, db)
|
||||
defer func() {
|
||||
cancel()
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// Start all modules.
|
||||
if err = rollerManager.Start(); err != nil {
|
||||
log.Crit("couldn't start roller manager", "error", err)
|
||||
}
|
||||
|
||||
apis := rollerManager.APIs()
|
||||
apis := api.APIs(cfg, db)
|
||||
// Register api and start rpc service.
|
||||
if ctx.Bool(httpEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartHTTPEndpoint(fmt.Sprintf("%s:%d", ctx.String(httpListenAddrFlag.Name), ctx.Int(httpPortFlag.Name)), apis)
|
||||
@@ -89,8 +84,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
// Register api and start ws service.
|
||||
if ctx.Bool(wsEnabledFlag.Name) {
|
||||
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)),
|
||||
apis, cfg.RollerManagerConfig.CompressionLevel)
|
||||
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)), apis, cfg.CompressionLevel)
|
||||
if err != nil {
|
||||
log.Crit("Could not start WS api", "error", err)
|
||||
}
|
||||
|
||||
@@ -17,9 +17,7 @@ import (
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
wsStartPort int64 = 40000
|
||||
)
|
||||
var wsStartPort int64 = 40000
|
||||
|
||||
// CoordinatorApp coordinator-test client manager.
|
||||
type CoordinatorApp struct {
|
||||
@@ -99,5 +97,5 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(c.coordinatorFile, data, 0600)
|
||||
return os.WriteFile(c.coordinatorFile, data, 0o600)
|
||||
}
|
||||
|
||||
@@ -15,26 +15,6 @@ const (
|
||||
defaultNumberOfSessionRetryAttempts = 2
|
||||
)
|
||||
|
||||
// RollerManagerConfig loads sequencer configuration items.
|
||||
type RollerManagerConfig struct {
|
||||
CompressionLevel int `json:"compression_level,omitempty"`
|
||||
// asc or desc (default: asc)
|
||||
OrderSession string `json:"order_session,omitempty"`
|
||||
// The amount of rollers to pick per proof generation session.
|
||||
RollersPerSession uint8 `json:"rollers_per_session"`
|
||||
// Number of attempts that a session can be retried if previous attempts failed.
|
||||
// Currently we only consider proving timeout as failure here.
|
||||
SessionAttempts uint8 `json:"session_attempts,omitempty"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier,omitempty"`
|
||||
// Proof collection time (in minutes).
|
||||
CollectionTime int `json:"collection_time"`
|
||||
// Token time to live (in seconds)
|
||||
TokenTimeToLive int `json:"token_time_to_live"`
|
||||
// Max number of workers in verifier worker pool
|
||||
MaxVerifierWorkers int `json:"max_verifier_workers,omitempty"`
|
||||
}
|
||||
|
||||
// L2Config loads l2geth configuration items.
|
||||
type L2Config struct {
|
||||
// l2geth node url.
|
||||
@@ -43,9 +23,25 @@ type L2Config struct {
|
||||
|
||||
// Config load configuration items.
|
||||
type Config struct {
|
||||
RollerManagerConfig *RollerManagerConfig `json:"roller_manager_config"`
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
L2Config *L2Config `json:"l2_config"`
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
L2Config *L2Config `json:"l2_config"`
|
||||
|
||||
CompressionLevel int `json:"compression_level,omitempty"`
|
||||
// asc or desc (default: asc)
|
||||
OrderSession string `json:"order_session,omitempty"`
|
||||
// The amount of rollers to pick per proof generation session.
|
||||
RollersPerSession uint8 `json:"rollers_per_session"`
|
||||
// Number of attempts that a session can be retried if previous attempts failed.
|
||||
// Currently we only consider proving timeout as failure here.
|
||||
SessionAttempts int `json:"session_attempts,omitempty"`
|
||||
// Zk verifier config.
|
||||
Verifier *VerifierConfig `json:"verifier,omitempty"`
|
||||
// Proof collection time (in minutes).
|
||||
CollectionTime int `json:"collection_time"`
|
||||
// Token time to live (in seconds)
|
||||
TokenTimeToLive int `json:"token_time_to_live"`
|
||||
// Max number of workers in verifier worker pool
|
||||
MaxVerifierWorkers int `json:"max_verifier_workers,omitempty"`
|
||||
}
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
@@ -69,17 +65,17 @@ func NewConfig(file string) (*Config, error) {
|
||||
}
|
||||
|
||||
// Check roller's order session
|
||||
order := strings.ToUpper(cfg.RollerManagerConfig.OrderSession)
|
||||
order := strings.ToUpper(cfg.OrderSession)
|
||||
if len(order) > 0 && !(order == "ASC" || order == "DESC") {
|
||||
return nil, errors.New("roller config's order session is invalid")
|
||||
}
|
||||
cfg.RollerManagerConfig.OrderSession = order
|
||||
cfg.OrderSession = order
|
||||
|
||||
if cfg.RollerManagerConfig.MaxVerifierWorkers == 0 {
|
||||
cfg.RollerManagerConfig.MaxVerifierWorkers = defaultNumberOfVerifierWorkers
|
||||
if cfg.MaxVerifierWorkers == 0 {
|
||||
cfg.MaxVerifierWorkers = defaultNumberOfVerifierWorkers
|
||||
}
|
||||
if cfg.RollerManagerConfig.SessionAttempts == 0 {
|
||||
cfg.RollerManagerConfig.SessionAttempts = defaultNumberOfSessionRetryAttempts
|
||||
if cfg.SessionAttempts == 0 {
|
||||
cfg.SessionAttempts = defaultNumberOfSessionRetryAttempts
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
|
||||
@@ -60,7 +60,7 @@ func TestConfig(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
assert.NoError(t, os.WriteFile(tmpJSON, data, 0644))
|
||||
assert.NoError(t, os.WriteFile(tmpJSON, data, 0o644))
|
||||
|
||||
cfg2, err := NewConfig(tmpJSON)
|
||||
assert.NoError(t, err)
|
||||
@@ -116,7 +116,7 @@ func TestConfig(t *testing.T) {
|
||||
|
||||
cfg, err := NewConfig(tmpFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, defaultNumberOfVerifierWorkers, cfg.RollerManagerConfig.MaxVerifierWorkers)
|
||||
assert.Equal(t, defaultNumberOfVerifierWorkers, cfg.MaxVerifierWorkers)
|
||||
})
|
||||
|
||||
t.Run("Default SessionAttempts", func(t *testing.T) {
|
||||
@@ -132,6 +132,6 @@ func TestConfig(t *testing.T) {
|
||||
|
||||
cfg, err := NewConfig(tmpFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(defaultNumberOfSessionRetryAttempts), cfg.RollerManagerConfig.SessionAttempts)
|
||||
assert.Equal(t, defaultNumberOfSessionRetryAttempts, cfg.SessionAttempts)
|
||||
})
|
||||
}
|
||||
|
||||
106
coordinator/internal/controller/api/roller.go
Normal file
106
coordinator/internal/controller/api/roller.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/proof"
|
||||
)
|
||||
|
||||
// RollerController the roller api controller
|
||||
type RollerController struct {
|
||||
tokenCache *cache.Cache
|
||||
proofReceiver *proof.ZKProofReceiver
|
||||
taskWorker *proof.TaskWorker
|
||||
}
|
||||
|
||||
// NewRollerController create a roller controller
|
||||
func NewRollerController(cfg *config.Config, db *gorm.DB) *RollerController {
|
||||
return &RollerController{
|
||||
proofReceiver: proof.NewZKProofReceiver(cfg, db),
|
||||
taskWorker: proof.NewTaskWorker(),
|
||||
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
|
||||
}
|
||||
}
|
||||
|
||||
// RequestToken get request token of authMsg
|
||||
func (r *RollerController) RequestToken(authMsg *message.AuthMsg) (string, error) {
|
||||
if ok, err := authMsg.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify auth message", "error", err)
|
||||
}
|
||||
return "", errors.New("signature verification failed")
|
||||
}
|
||||
pubkey, _ := authMsg.PublicKey()
|
||||
if token, ok := r.tokenCache.Get(pubkey); ok {
|
||||
return token.(string), nil
|
||||
}
|
||||
token, err := message.GenerateToken()
|
||||
if err != nil {
|
||||
return "", errors.New("token generation failed")
|
||||
}
|
||||
r.tokenCache.SetDefault(pubkey, token)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// VerifyToken verifies pukey for token and expiration time
|
||||
func (r *RollerController) verifyToken(authMsg *message.AuthMsg) (bool, error) {
|
||||
pubkey, _ := authMsg.PublicKey()
|
||||
// GetValue returns nil if value is expired
|
||||
if token, ok := r.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
|
||||
return false, fmt.Errorf("failed to find corresponding token. roller name: %s roller pk: %s", authMsg.Identity.Name, pubkey)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Register register api for roller
|
||||
func (r *RollerController) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
// Verify register message.
|
||||
if ok, err := authMsg.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify auth message", "error", err)
|
||||
}
|
||||
return nil, errors.New("signature verification failed")
|
||||
}
|
||||
// Lock here to avoid malicious roller message replay before cleanup of token
|
||||
if ok, err := r.verifyToken(authMsg); !ok {
|
||||
return nil, err
|
||||
}
|
||||
pubkey, _ := authMsg.PublicKey()
|
||||
// roller successfully registered, remove token associated with this roller
|
||||
r.tokenCache.Delete(pubkey)
|
||||
|
||||
rpcSub, err := r.taskWorker.AllocTaskWorker(ctx, authMsg)
|
||||
if err != nil {
|
||||
return rpcSub, err
|
||||
}
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
// SubmitProof roller pull proof
|
||||
func (r *RollerController) SubmitProof(proof *message.ProofMsg) error {
|
||||
// Verify the signature
|
||||
if ok, err := proof.Verify(); !ok {
|
||||
if err != nil {
|
||||
log.Error("failed to verify proof message", "error", err)
|
||||
}
|
||||
return errors.New("auth signature verify fail")
|
||||
}
|
||||
|
||||
err := r.proofReceiver.HandleZkProof(context.Background(), proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
301
coordinator/internal/controller/api/roller_test.go
Normal file
301
coordinator/internal/controller/api/roller_test.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/proof"
|
||||
"scroll-tech/coordinator/internal/logic/rollermanager"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
func geneAuthMsg(t *testing.T) (*message.AuthMsg, *ecdsa.PrivateKey) {
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test1",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, authMsg.SignWithKey(privKey))
|
||||
return authMsg, privKey
|
||||
}
|
||||
|
||||
var rollerController *RollerController
|
||||
|
||||
func init() {
|
||||
conf := &config.Config{
|
||||
TokenTimeToLive: 120,
|
||||
}
|
||||
conf.Verifier = &config.VerifierConfig{MockMode: true}
|
||||
rollerController = NewRollerController(conf, nil)
|
||||
}
|
||||
|
||||
func TestRoller_RequestToken(t *testing.T) {
|
||||
convey.Convey("auth msg verify failure", t, func() {
|
||||
tmpAuthMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test_request_token",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
token, err := rollerController.RequestToken(tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, token)
|
||||
})
|
||||
|
||||
convey.Convey("token has already been distributed", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
key, _ := tmpAuthMsg.PublicKey()
|
||||
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
|
||||
rollerController.tokenCache.Set(key, tokenCacheStored, time.Hour)
|
||||
token, err := rollerController.RequestToken(tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, token, tokenCacheStored)
|
||||
})
|
||||
|
||||
convey.Convey("token generation failure", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
|
||||
return "", errors.New("token generation failed")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
token, err := rollerController.RequestToken(tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, token)
|
||||
})
|
||||
|
||||
convey.Convey("token generation success", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
|
||||
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
|
||||
return tokenCacheStored, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
token, err := rollerController.RequestToken(tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tokenCacheStored, token)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRoller_Register(t *testing.T) {
|
||||
convey.Convey("auth msg verify failure", t, func() {
|
||||
tmpAuthMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: "roller_test_register",
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("verify token failure", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return false, errors.New("verify token failure")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("notifier failure", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
|
||||
return nil, false
|
||||
})
|
||||
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
|
||||
assert.Equal(t, *subscription, rpc.Subscription{})
|
||||
})
|
||||
|
||||
convey.Convey("register failure", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var taskWorker *proof.TaskWorker
|
||||
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
return nil, errors.New("register error")
|
||||
})
|
||||
subscription, err := rollerController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.Error(t, err)
|
||||
assert.Empty(t, subscription)
|
||||
})
|
||||
|
||||
convey.Convey("register success", t, func() {
|
||||
tmpAuthMsg, _ := geneAuthMsg(t)
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(rollerController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var taskWorker *proof.TaskWorker
|
||||
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
return nil, nil
|
||||
})
|
||||
_, err := rollerController.Register(context.Background(), tmpAuthMsg)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRoller_SubmitProof(t *testing.T) {
|
||||
tmpAuthMsg, prvKey := geneAuthMsg(t)
|
||||
pubKey, err := tmpAuthMsg.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
|
||||
id := "rollers_info_test"
|
||||
tmpProof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
Type: message.ProofTypeChunk,
|
||||
ID: id,
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, tmpProof.Sign(prvKey))
|
||||
proofPubKey, err := tmpProof.PublicKey()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, pubKey, proofPubKey)
|
||||
|
||||
rollermanager.InitRollerManager()
|
||||
|
||||
taskChan, err := rollermanager.Manager.Register(pubKey, tmpAuthMsg.Identity)
|
||||
assert.NotNil(t, taskChan)
|
||||
assert.NoError(t, err)
|
||||
|
||||
convey.Convey("verify failure", t, func() {
|
||||
var s *message.ProofMsg
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s, "Verify", func() (bool, error) {
|
||||
return false, errors.New("proof verify error")
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
err = rollerController.SubmitProof(tmpProof)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
var s *message.ProofMsg
|
||||
patchGuard := gomonkey.ApplyMethodFunc(s, "Verify", func() (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var chunkOrm *orm.Chunk
|
||||
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProofByHash", func(context.Context, string, *message.AggProof, uint64, ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
var batchOrm *orm.Batch
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProofByHash", func(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
var proverTaskOrm *orm.ProverTask
|
||||
convey.Convey("get none rollers of prover task", t, func() {
|
||||
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByHashAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
|
||||
return nil, nil
|
||||
})
|
||||
tmpProof1 := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: "10001",
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
tmpProof1.Sign(privKey)
|
||||
_, err1 := tmpProof1.PublicKey()
|
||||
assert.NoError(t, err1)
|
||||
err2 := rollerController.SubmitProof(tmpProof1)
|
||||
fmt.Println(err2)
|
||||
targetErr := fmt.Errorf("validator failure get none rollers for the proof")
|
||||
assert.Equal(t, err2.Error(), targetErr.Error())
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByHashAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
|
||||
now := time.Now()
|
||||
s := &orm.ProverTask{
|
||||
TaskID: id,
|
||||
ProverPublicKey: proofPubKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: "rollers_info_test",
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
CreatedAt: now,
|
||||
}
|
||||
return s, nil
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(proverTaskOrm, "UpdateProverTaskProvingStatus", func(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(rollerController.proofReceiver, "proofFailure", func(hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
})
|
||||
|
||||
convey.Convey("proof msg status is not ok", t, func() {
|
||||
tmpProof.Status = message.StatusProofError
|
||||
err1 := rollerController.SubmitProof(tmpProof)
|
||||
assert.NoError(t, err1)
|
||||
})
|
||||
tmpProof.Status = message.StatusOk
|
||||
|
||||
var tmpVerifier *verifier.Verifier
|
||||
convey.Convey("verifier proof failure", t, func() {
|
||||
targetErr := errors.New("verify proof failure")
|
||||
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyProof", func(proof *message.AggProof) (bool, error) {
|
||||
return false, targetErr
|
||||
})
|
||||
err1 := rollerController.SubmitProof(tmpProof)
|
||||
assert.Nil(t, err1)
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyProof", func(proof *message.AggProof) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyProof", func(proof *message.AggProof) (bool, error) {
|
||||
return true, nil
|
||||
})
|
||||
|
||||
patchGuard.ApplyPrivateMethod(rollerController.proofReceiver, "closeProofTask", func(hash string, pubKey string, proofMsg *message.ProofMsg, rollersInfo *coordinatorType.RollersInfo) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
err1 := rollerController.SubmitProof(tmpProof)
|
||||
assert.Nil(t, err1)
|
||||
}
|
||||
30
coordinator/internal/controller/api/routes.go
Normal file
30
coordinator/internal/controller/api/routes.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
// RollerAPI for rollers inorder to register and submit proof
|
||||
type RollerAPI interface {
|
||||
RequestToken(authMsg *message.AuthMsg) (string, error)
|
||||
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
|
||||
SubmitProof(proof *message.ProofMsg) error
|
||||
}
|
||||
|
||||
// APIs register api for coordinator
|
||||
func APIs(cfg *config.Config, db *gorm.DB) []rpc.API {
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "roller",
|
||||
Service: RollerAPI(NewRollerController(cfg, db)),
|
||||
Public: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
145
coordinator/internal/controller/cron/collect_proof.go
Normal file
145
coordinator/internal/controller/cron/collect_proof.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/collector"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// Collector collect the block batch or agg task to send to prover
|
||||
type Collector struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
ctx context.Context
|
||||
stopChan chan struct{}
|
||||
|
||||
collectors map[message.ProofType]collector.Collector
|
||||
|
||||
proverTask *orm.ProverTask
|
||||
}
|
||||
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config) *Collector {
|
||||
c := &Collector{
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopChan: make(chan struct{}),
|
||||
collectors: make(map[message.ProofType]collector.Collector),
|
||||
proverTask: orm.NewProverTask(db),
|
||||
}
|
||||
|
||||
c.collectors[message.ProofTypeBatch] = collector.NewBatchProofCollector(cfg, db)
|
||||
c.collectors[message.ProofTypeChunk] = collector.NewChunkProofCollector(cfg, db)
|
||||
|
||||
go c.run()
|
||||
go c.timeoutProofTask()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Stop all the collector
|
||||
func (c *Collector) Stop() {
|
||||
c.stopChan <- struct{}{}
|
||||
}
|
||||
|
||||
// run loop and cron collect
|
||||
func (c *Collector) run() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("collector panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
for _, tmpCollector := range c.collectors {
|
||||
if err := tmpCollector.Collect(c.ctx); err != nil {
|
||||
log.Warn("%s collect data to prover failure:%v", tmpCollector.Name(), err)
|
||||
}
|
||||
}
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, mark
|
||||
func (c *Collector) timeoutProofTask() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("timeout proof task panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 2)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
unsignedProverTasks, err := c.proverTask.GetAssignedProverTasks(c.ctx, 10)
|
||||
if err != nil {
|
||||
log.Error("get unassigned session info failure", err)
|
||||
break
|
||||
}
|
||||
|
||||
for _, unsignedProverTask := range unsignedProverTasks {
|
||||
timeoutDuration := time.Duration(c.cfg.CollectionTime) * time.Minute
|
||||
// here not update the block batch proving status failed, because the collector loop
|
||||
// will check the attempt times. if reach the times, the collector will set the block batch
|
||||
// proving status.
|
||||
if time.Since(unsignedProverTask.CreatedAt) >= timeoutDuration {
|
||||
err = c.db.Transaction(func(tx *gorm.DB) error {
|
||||
// update prover task proving status as RollerProofInvalid
|
||||
if err = c.proverTask.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(unsignedProverTask.TaskType),
|
||||
unsignedProverTask.TaskID, unsignedProverTask.ProverPublicKey, types.RollerProofInvalid, tx); err != nil {
|
||||
|
||||
log.Error("update prover task proving status failure", "hash", unsignedProverTask.TaskID, "pubKey", unsignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type
|
||||
if err = c.proverTask.UpdateProverTaskFailureType(c.ctx, message.ProofType(unsignedProverTask.TaskType),
|
||||
unsignedProverTask.TaskID, unsignedProverTask.ProverPublicKey, coordinatorType.ProverTaskFailureTypeTimeout, tx); err != nil {
|
||||
|
||||
log.Error("update prover task failure type failure", "hash", unsignedProverTask.TaskID, "pubKey", unsignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("check task proof is timeout failure", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
112
coordinator/internal/logic/collector/batch_proof_collector.go
Normal file
112
coordinator/internal/logic/collector/batch_proof_collector.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/rollermanager"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// BatchProofCollector is collector implement for batch proof
|
||||
type BatchProofCollector struct {
|
||||
BaseCollector
|
||||
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewBatchProofCollector new a batch collector
|
||||
func NewBatchProofCollector(cfg *config.Config, db *gorm.DB) *BatchProofCollector {
|
||||
ac := &BatchProofCollector{
|
||||
db: db,
|
||||
BaseCollector: BaseCollector{
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
}
|
||||
return ac
|
||||
}
|
||||
|
||||
// Name return the batch proof collector name
|
||||
func (ac *BatchProofCollector) Name() string {
|
||||
return BatchCollectorName
|
||||
}
|
||||
|
||||
// Collect load and send batch tasks
|
||||
func (ac *BatchProofCollector) Collect(ctx context.Context) error {
|
||||
batchTasks, err := ac.batchOrm.GetUnassignedBatches(ctx, 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
|
||||
}
|
||||
|
||||
if len(batchTasks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(batchTasks) != 1 {
|
||||
return fmt.Errorf("get unassigned batch proving task len not 1")
|
||||
}
|
||||
|
||||
batchTask := batchTasks[0]
|
||||
log.Info("start batch proof generation session", "id", batchTask.Hash)
|
||||
|
||||
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
|
||||
return fmt.Errorf("no idle common roller when starting proof generation session, id:%s", batchTask.Hash)
|
||||
}
|
||||
|
||||
if !ac.checkAttemptsExceeded(batchTask.Hash) {
|
||||
return fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
|
||||
}
|
||||
|
||||
rollerStatusList, err := ac.sendTask(ctx, batchTask.Hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send batch task id:%s err:%w", batchTask.Hash, err)
|
||||
}
|
||||
|
||||
transErr := ac.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Update session proving status as assigned.
|
||||
if err = ac.batchOrm.UpdateProvingStatus(ctx, batchTask.Hash, types.ProvingTaskAssigned); err != nil {
|
||||
return fmt.Errorf("failed to update task status, id:%s, error:%w", batchTask.Hash, err)
|
||||
}
|
||||
|
||||
for _, rollerStatus := range rollerStatusList {
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: rollerStatus.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: rollerStatus.Name,
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
FailureType: int16(types.RollerFailureTypeUndefined),
|
||||
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = ac.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
|
||||
return fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return transErr
|
||||
}
|
||||
|
||||
func (ac *BatchProofCollector) sendTask(ctx context.Context, taskID string) ([]*coordinatorType.RollerStatus, error) {
|
||||
// get chunk proofs from db
|
||||
chunkProofs, err := ac.chunkOrm.GetProofsByBatchHash(ctx, taskID)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", taskID, err)
|
||||
return nil, err
|
||||
}
|
||||
return ac.BaseCollector.sendTask(message.ProofTypeBatch, taskID, nil, chunkProofs)
|
||||
}
|
||||
118
coordinator/internal/logic/collector/chunk_proof_collector.go
Normal file
118
coordinator/internal/logic/collector/chunk_proof_collector.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/rollermanager"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ChunkProofCollector the chunk proof collector
|
||||
type ChunkProofCollector struct {
|
||||
db *gorm.DB
|
||||
|
||||
BaseCollector
|
||||
}
|
||||
|
||||
// NewChunkProofCollector new a chunk proof collector
|
||||
func NewChunkProofCollector(cfg *config.Config, db *gorm.DB) *ChunkProofCollector {
|
||||
cp := &ChunkProofCollector{
|
||||
db: db,
|
||||
BaseCollector: BaseCollector{
|
||||
cfg: cfg,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
blockOrm: orm.NewL2Block(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
},
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
// Name return a block batch collector name
|
||||
func (cp *ChunkProofCollector) Name() string {
|
||||
return ChunkCollectorName
|
||||
}
|
||||
|
||||
// Collect the chunk proof which need to prove
|
||||
func (cp *ChunkProofCollector) Collect(ctx context.Context) error {
|
||||
// load and send chunk tasks
|
||||
chunkTasks, err := cp.chunkOrm.GetUnassignedChunks(ctx, 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
|
||||
}
|
||||
|
||||
if len(chunkTasks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(chunkTasks) != 1 {
|
||||
return fmt.Errorf("get unassigned chunk proving task len not 1")
|
||||
}
|
||||
|
||||
chunkTask := chunkTasks[0]
|
||||
|
||||
log.Info("start chunk generation session", "id", chunkTask.Hash)
|
||||
|
||||
if !cp.checkAttemptsExceeded(chunkTask.Hash) {
|
||||
return fmt.Errorf("the session id:%s check attempts have reach the maximum", chunkTask.Hash)
|
||||
}
|
||||
|
||||
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
|
||||
return fmt.Errorf("no idle chunk roller when starting proof generation session, id:%s", chunkTask.Hash)
|
||||
}
|
||||
|
||||
rollerStatusList, err := cp.sendTask(ctx, chunkTask.Hash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("send task failure, id:%s error:%w", chunkTask.Hash, err)
|
||||
}
|
||||
|
||||
transErr := cp.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Update session proving status as assigned.
|
||||
if err = cp.chunkOrm.UpdateProvingStatus(ctx, chunkTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
|
||||
log.Error("failed to update task status", "id", chunkTask.Hash, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, rollerStatus := range rollerStatusList {
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: rollerStatus.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: rollerStatus.Name,
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
FailureType: int16(types.RollerFailureTypeUndefined),
|
||||
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
|
||||
}
|
||||
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
|
||||
return fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, rollerStatus.PublicKey, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return transErr
|
||||
}
|
||||
|
||||
func (cp *ChunkProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.RollerStatus, error) {
|
||||
// Get block hashes.
|
||||
wrappedBlocks, err := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, err)
|
||||
}
|
||||
blockHashes := make([]common.Hash, len(wrappedBlocks))
|
||||
for i, wrappedBlock := range wrappedBlocks {
|
||||
blockHashes[i] = wrappedBlock.Header.Hash()
|
||||
}
|
||||
|
||||
return cp.BaseCollector.sendTask(message.ProofTypeChunk, hash, blockHashes, nil)
|
||||
}
|
||||
131
coordinator/internal/logic/collector/collector.go
Normal file
131
coordinator/internal/logic/collector/collector.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/rollermanager"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BatchCollectorName the name of batch collector
|
||||
BatchCollectorName = "batch_collector"
|
||||
// ChunkCollectorName the name of chunk collector
|
||||
ChunkCollectorName = "chunk_collector"
|
||||
)
|
||||
|
||||
var coordinatorSessionsTimeoutTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
|
||||
|
||||
// Collector the interface of a collector who send data to prover
|
||||
type Collector interface {
|
||||
Name() string
|
||||
Collect(ctx context.Context) error
|
||||
}
|
||||
|
||||
// HashTaskPublicKey hash public key pair
|
||||
type HashTaskPublicKey struct {
|
||||
Attempt int
|
||||
PubKey string
|
||||
}
|
||||
|
||||
// BaseCollector a base collector which contain series functions
|
||||
type BaseCollector struct {
|
||||
cfg *config.Config
|
||||
ctx context.Context
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
blockOrm *orm.L2Block
|
||||
proverTaskOrm *orm.ProverTask
|
||||
}
|
||||
|
||||
// checkAttempts use the count of prover task info to check the attempts
|
||||
func (b *BaseCollector) checkAttemptsExceeded(hash string) bool {
|
||||
whereFields := make(map[string]interface{})
|
||||
whereFields["hash"] = hash
|
||||
proverTasks, err := b.proverTaskOrm.GetProverTasks(whereFields, nil, 0)
|
||||
if err != nil {
|
||||
log.Error("get session info error", "hash id", hash, "error", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(proverTasks) >= b.cfg.SessionAttempts {
|
||||
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
|
||||
|
||||
var isAllFailed bool
|
||||
for _, proverTask := range proverTasks {
|
||||
if types.ProvingStatus(proverTask.ProvingStatus) != types.ProvingTaskFailed {
|
||||
isAllFailed = false
|
||||
}
|
||||
|
||||
if types.ProvingStatus(proverTask.ProvingStatus) == types.ProvingTaskFailed {
|
||||
rollermanager.Manager.FreeTaskIDForRoller(proverTask.ProverPublicKey, hash)
|
||||
}
|
||||
}
|
||||
|
||||
if isAllFailed {
|
||||
// Set status as skipped.
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
|
||||
if message.ProofType(proverTasks[0].TaskType) == message.ProofTypeChunk {
|
||||
if err := b.chunkOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
|
||||
}
|
||||
}
|
||||
if message.ProofType(proverTasks[0].TaskType) == message.ProofTypeBatch {
|
||||
if err := b.batchOrm.UpdateProvingStatus(b.ctx, hash, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update batch proving_status as failed", "msg.ID", hash, "error", err)
|
||||
}
|
||||
}
|
||||
coordinatorSessionsTimeoutTotalCounter.Inc(1)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *BaseCollector) sendTask(proveType message.ProofType, hash string, blockHashes []common.Hash, subProofs []*message.AggProof) ([]*coordinatorType.RollerStatus, error) {
|
||||
sendMsg := &message.TaskMsg{
|
||||
ID: hash,
|
||||
Type: proveType,
|
||||
BlockHashes: blockHashes,
|
||||
SubProofs: subProofs,
|
||||
}
|
||||
|
||||
var err error
|
||||
var rollerStatusList []*coordinatorType.RollerStatus
|
||||
for i := uint8(0); i < b.cfg.RollersPerSession; i++ {
|
||||
rollerPubKey, rollerName, sendErr := rollermanager.Manager.SendTask(proveType, sendMsg)
|
||||
if sendErr != nil {
|
||||
err = sendErr
|
||||
continue
|
||||
}
|
||||
|
||||
rollermanager.Manager.UpdateMetricRollerProofsLastAssignedTimestampGauge(rollerPubKey)
|
||||
|
||||
rollerStatus := &coordinatorType.RollerStatus{
|
||||
PublicKey: rollerPubKey,
|
||||
Name: rollerName,
|
||||
Status: types.RollerAssigned,
|
||||
}
|
||||
rollerStatusList = append(rollerStatusList, rollerStatus)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rollerStatusList, nil
|
||||
}
|
||||
289
coordinator/internal/logic/proof/proof_receiver.go
Normal file
289
coordinator/internal/logic/proof/proof_receiver.go
Normal file
@@ -0,0 +1,289 @@
|
||||
package proof
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/rollermanager"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
types2 "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
var (
|
||||
coordinatorProofsGeneratedFailedTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/generated/failed/time", metrics.ScrollRegistry)
|
||||
coordinatorProofsReceivedTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
|
||||
coordinatorProofsVerifiedSuccessTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/verified/success/time", metrics.ScrollRegistry)
|
||||
coordinatorProofsVerifiedFailedTimeTimer = gethMetrics.NewRegisteredTimer("coordinator/proofs/verified/failed/time", metrics.ScrollRegistry)
|
||||
coordinatorSessionsFailedTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/sessions/failed/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrValidatorFailureProofMsgStatusNotOk proof msg status not ok
|
||||
ErrValidatorFailureProofMsgStatusNotOk = errors.New("validator failure proof msg status not ok")
|
||||
// ErrValidatorFailureRollerEmpty get none rollers
|
||||
ErrValidatorFailureRollerEmpty = errors.New("validator failure get none rollers for the proof")
|
||||
// ErrValidatorFailureRollerInfoHasProofValid proof is vaild
|
||||
ErrValidatorFailureRollerInfoHasProofValid = errors.New("validator failure roller info has proof valid")
|
||||
)
|
||||
|
||||
// ZKProofReceiver the proof receiver
|
||||
type ZKProofReceiver struct {
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
proverTaskOrm *orm.ProverTask
|
||||
|
||||
db *gorm.DB
|
||||
cfg *config.Config
|
||||
|
||||
verifier *verifier.Verifier
|
||||
}
|
||||
|
||||
// NewZKProofReceiver create a proof receiver
|
||||
func NewZKProofReceiver(cfg *config.Config, db *gorm.DB) *ZKProofReceiver {
|
||||
vf, err := verifier.NewVerifier(cfg.Verifier)
|
||||
if err != nil {
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
return &ZKProofReceiver{
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
|
||||
verifier: vf,
|
||||
}
|
||||
}
|
||||
|
||||
// HandleZkProof handle a ZkProof submitted from a roller.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.ProofMsg) error {
|
||||
pk, _ := proofMsg.PublicKey()
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByHashAndPubKey(ctx, proofMsg.ID, pk)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none rollers for the proof key", pk, "id", proofMsg.ID)
|
||||
return ErrValidatorFailureRollerEmpty
|
||||
}
|
||||
|
||||
if err = m.validator(proverTask, pk, proofMsg); err != nil {
|
||||
if errors.Is(err, ErrValidatorFailureProofMsgStatusNotOk) {
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
// store proof content
|
||||
var storeProofErr error
|
||||
switch proofMsg.Type {
|
||||
case message.ProofTypeChunk:
|
||||
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
|
||||
if dbErr := m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.Proof, proofTimeSec, tx); dbErr != nil {
|
||||
return fmt.Errorf("failed to store chunk proof into db, err:%w", dbErr)
|
||||
}
|
||||
if dbErr := m.chunkOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
|
||||
return fmt.Errorf("failed to update chunk task status as proved, error:%w", dbErr)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
case message.ProofTypeBatch:
|
||||
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
|
||||
if dbErr := m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.Proof, proofTimeSec, tx); dbErr != nil {
|
||||
return fmt.Errorf("failed to store batch proof into db, error:%w", dbErr)
|
||||
}
|
||||
if dbErr := m.batchOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
|
||||
return fmt.Errorf("failed to update batch task status as proved, error:%w", dbErr)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if storeProofErr != nil {
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
log.Error("failed to store basic proof into db", "error", storeProofErr)
|
||||
return storeProofErr
|
||||
}
|
||||
|
||||
coordinatorProofsReceivedTotalCounter.Inc(1)
|
||||
|
||||
// TODO: wrap both basic verifier and aggregator verifier
|
||||
success, verifyErr := m.verifier.VerifyProof(proofMsg.Proof)
|
||||
if verifyErr != nil || !success {
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
log.Error("Failed to verify zk proof", "proof id", proofMsg.ID, "roller pk", pk, "prove type",
|
||||
proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
|
||||
// TODO: Roller needs to be slashed if proof is invalid.
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
|
||||
rollermanager.Manager.UpdateMetricRollerProofsVerifiedFailedTimeTimer(pk, proofTime)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "roller name", "roller pk",
|
||||
pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg); err != nil {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
}
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
rollermanager.Manager.UpdateMetricRollerProofsVerifiedSuccessTimeTimer(pk, proofTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
|
||||
pubKey, _ := proofMsg.PublicKey()
|
||||
rollermanager.Manager.UpdateMetricRollerProofsLastFinishedTimestampGauge(pubKey)
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
|
||||
"roller pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
|
||||
|
||||
if proofMsg.Status != message.StatusOk {
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
|
||||
rollermanager.Manager.UpdateMetricRollerProofsGeneratedFailedTimeTimer(pk, proofTime)
|
||||
|
||||
log.Info("proof generated by roller failed", "proof id", proofMsg.ID, "roller name", proverTask.ProverName,
|
||||
"roller pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
}
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg.Type, types.ProvingTaskVerified); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
rollermanager.Manager.FreeTaskIDForRoller(pubKey, hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProofStatus update the block batch/agg task and session info status
|
||||
func (m *ZKProofReceiver) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
|
||||
// if the prover task failure type is SessionInfoFailureTimeout,
|
||||
// just skip update the status because the proof result come so slow.
|
||||
if m.checkIsTimeoutFailure(ctx, hash, proverPublicKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := m.db.Transaction(func(tx *gorm.DB) error {
|
||||
// if the block batch has proof verified, so the failed status not update block batch proving status
|
||||
if status == types.ProvingTaskFailed && !m.checkIsTaskSuccess(ctx, hash, proofMsgType) {
|
||||
switch proofMsgType {
|
||||
case message.ProofTypeChunk:
|
||||
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
|
||||
log.Error("failed to update basic proving_status as failed", "msg.ID", hash, "error", err)
|
||||
return err
|
||||
}
|
||||
case message.ProofTypeBatch:
|
||||
if err := m.batchOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
|
||||
log.Error("failed to update aggregator proving_status as failed", "msg.ID", hash, "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if status != types.ProvingTaskFailed {
|
||||
switch proofMsgType {
|
||||
case message.ProofTypeChunk:
|
||||
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
|
||||
log.Error("failed to update basic proving_status as failed", "msg.ID", hash, "error", err)
|
||||
return err
|
||||
}
|
||||
case message.ProofTypeBatch:
|
||||
if err := m.batchOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
|
||||
log.Error("failed to update aggregator proving_status as failed", "msg.ID", hash, "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var proverTaskStatus types.RollerProveStatus
|
||||
switch status {
|
||||
case types.ProvingTaskProved:
|
||||
proverTaskStatus = types.RollerProofInvalid
|
||||
case types.ProvingTaskUnassigned:
|
||||
proverTaskStatus = types.RollerProveStatusUndefined
|
||||
case types.ProvingTaskVerified:
|
||||
proverTaskStatus = types.RollerProofValid
|
||||
}
|
||||
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsgType, hash, proverPublicKey, proverTaskStatus); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) checkIsTaskSuccess(ctx context.Context, hash string, proofType message.ProofType) bool {
|
||||
switch proofType {
|
||||
case message.ProofTypeChunk:
|
||||
provingStatus, err := m.chunkOrm.GetProvingStatusByHash(ctx, hash)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if provingStatus == types.ProvingTaskVerified {
|
||||
return true
|
||||
}
|
||||
case message.ProofTypeBatch:
|
||||
provingStatus, err := m.batchOrm.GetProvingStatusByHash(ctx, hash)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if provingStatus == types.ProvingTaskVerified {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *ZKProofReceiver) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByHashAndPubKey(ctx, hash, proverPublicKey)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if types2.ProverTaskFailureType(proverTask.FailureType) == types2.ProverTaskFailureTypeTimeout {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
73
coordinator/internal/logic/proof/task_worker.go
Normal file
73
coordinator/internal/logic/proof/task_worker.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package proof
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/logic/rollermanager"
|
||||
)
|
||||
|
||||
var coordinatorRollersDisconnectsTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/rollers/disconnects/total", metrics.ScrollRegistry)
|
||||
|
||||
// TaskWorker held the roller task connection
|
||||
type TaskWorker struct{}
|
||||
|
||||
// NewTaskWorker create a task worker
|
||||
func NewTaskWorker() *TaskWorker {
|
||||
return &TaskWorker{}
|
||||
}
|
||||
|
||||
// AllocTaskWorker alloc a task worker goroutine
|
||||
func (t *TaskWorker) AllocTaskWorker(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
|
||||
notifier, supported := rpc.NotifierFromContext(ctx)
|
||||
if !supported {
|
||||
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
|
||||
}
|
||||
|
||||
pubKey, _ := authMsg.PublicKey()
|
||||
identity := authMsg.Identity
|
||||
|
||||
// create or get the roller message channel
|
||||
taskCh, err := rollermanager.Manager.Register(pubKey, identity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rpcSub := notifier.CreateSubscription()
|
||||
|
||||
go t.worker(rpcSub, notifier, pubKey, identity, taskCh)
|
||||
|
||||
log.Info("roller register", "name", identity.Name, "pubKey", pubKey, "version", identity.Version)
|
||||
|
||||
return rpcSub, nil
|
||||
}
|
||||
|
||||
func (t *TaskWorker) worker(rpcSub *rpc.Subscription, notifier *rpc.Notifier, pubKey string, identity *message.Identity, taskCh <-chan *message.TaskMsg) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Error("task worker subId:%d panic for:%v", err)
|
||||
}
|
||||
|
||||
rollermanager.Manager.FreeRoller(pubKey)
|
||||
log.Info("roller unregister", "name", identity.Name, "pubKey", pubKey)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case task := <-taskCh:
|
||||
notifier.Notify(rpcSub.ID, task) //nolint
|
||||
case err := <-rpcSub.Err():
|
||||
coordinatorRollersDisconnectsTotalCounter.Inc(1)
|
||||
log.Warn("client stopped the ws connection", "name", identity.Name, "pubkey", pubKey, "err", err)
|
||||
return
|
||||
case <-notifier.Closed():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
60
coordinator/internal/logic/rollermanager/metrics.go
Normal file
60
coordinator/internal/logic/rollermanager/metrics.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package rollermanager
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
type rollerMetrics struct {
|
||||
rollerProofsVerifiedSuccessTimeTimer gethMetrics.Timer
|
||||
rollerProofsVerifiedFailedTimeTimer gethMetrics.Timer
|
||||
rollerProofsGeneratedFailedTimeTimer gethMetrics.Timer
|
||||
rollerProofsLastAssignedTimestampGauge gethMetrics.Gauge
|
||||
rollerProofsLastFinishedTimestampGauge gethMetrics.Gauge
|
||||
}
|
||||
|
||||
func (r *rollerManager) UpdateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
|
||||
if node, ok := r.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rollerManager) UpdateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
|
||||
if node, ok := r.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rollerManager) UpdateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := r.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rollerManager) UpdateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := r.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rollerManager) UpdateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := r.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).rollerMetrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
161
coordinator/internal/logic/rollermanager/roller_manager.go
Normal file
161
coordinator/internal/logic/rollermanager/roller_manager.go
Normal file
@@ -0,0 +1,161 @@
|
||||
package rollermanager
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
var (
|
||||
once sync.Once
|
||||
// Manager the global roller manager
|
||||
Manager *rollerManager
|
||||
)
|
||||
|
||||
// RollerNode the interface for controller how to use roller.
|
||||
type rollerNode struct {
|
||||
// Roller name
|
||||
Name string
|
||||
// Roller type
|
||||
Type message.ProofType
|
||||
// Roller public key
|
||||
PublicKey string
|
||||
// Roller version
|
||||
Version string
|
||||
|
||||
// task channel
|
||||
taskChan chan *message.TaskMsg
|
||||
// session id list which delivered to roller.
|
||||
TaskIDs cmap.ConcurrentMap
|
||||
|
||||
// Time of message creation
|
||||
registerTime time.Time
|
||||
|
||||
*rollerMetrics
|
||||
}
|
||||
|
||||
type rollerManager struct {
|
||||
rollerPool cmap.ConcurrentMap
|
||||
}
|
||||
|
||||
// InitRollerManager init a roller manager
|
||||
func InitRollerManager() {
|
||||
once.Do(func() {
|
||||
Manager = &rollerManager{
|
||||
rollerPool: cmap.New(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Register the identity message to roller manager with the public key
|
||||
func (r *rollerManager) Register(pubkey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
|
||||
node, ok := r.rollerPool.Get(pubkey)
|
||||
if !ok {
|
||||
rMs := &rollerMetrics{
|
||||
rollerProofsVerifiedSuccessTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsVerifiedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsGeneratedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsLastAssignedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsLastFinishedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", pubkey), metrics.ScrollRegistry),
|
||||
}
|
||||
node = &rollerNode{
|
||||
Name: identity.Name,
|
||||
Type: identity.RollerType,
|
||||
Version: identity.Version,
|
||||
PublicKey: pubkey,
|
||||
TaskIDs: cmap.New(),
|
||||
taskChan: make(chan *message.TaskMsg, 4),
|
||||
rollerMetrics: rMs,
|
||||
}
|
||||
r.rollerPool.Set(pubkey, node)
|
||||
}
|
||||
roller := node.(*rollerNode)
|
||||
// avoid reconnection too frequently.
|
||||
if time.Since(roller.registerTime) < 60 {
|
||||
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
|
||||
return nil, fmt.Errorf("roller reconnect too frequently")
|
||||
}
|
||||
// update register time and status
|
||||
roller.registerTime = time.Now()
|
||||
|
||||
return roller.taskChan, nil
|
||||
}
|
||||
|
||||
// SendTask send the need proved message to roller
|
||||
func (r *rollerManager) SendTask(rollerType message.ProofType, msg *message.TaskMsg) (string, string, error) {
|
||||
tmpRoller := r.selectRoller(rollerType)
|
||||
if tmpRoller == nil {
|
||||
return "", "", errors.New("selectRoller returns nil")
|
||||
}
|
||||
|
||||
select {
|
||||
case tmpRoller.taskChan <- msg:
|
||||
tmpRoller.TaskIDs.Set(msg.ID, struct{}{})
|
||||
default:
|
||||
err := fmt.Errorf("roller channel is full, rollerName:%s, publicKey:%s", tmpRoller.Name, tmpRoller.PublicKey)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
r.UpdateMetricRollerProofsLastAssignedTimestampGauge(tmpRoller.PublicKey)
|
||||
|
||||
return tmpRoller.PublicKey, tmpRoller.Name, nil
|
||||
}
|
||||
|
||||
// ExistTaskIDForRoller check the task exist
|
||||
func (r *rollerManager) ExistTaskIDForRoller(pk string, id string) bool {
|
||||
node, ok := r.rollerPool.Get(pk)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
roller := node.(*rollerNode)
|
||||
return roller.TaskIDs.Has(id)
|
||||
}
|
||||
|
||||
// FreeRoller free the roller with the pk key
|
||||
func (r *rollerManager) FreeRoller(pk string) {
|
||||
r.rollerPool.Pop(pk)
|
||||
}
|
||||
|
||||
// FreeTaskIDForRoller free a task of the pk roller
|
||||
func (r *rollerManager) FreeTaskIDForRoller(pk string, id string) {
|
||||
if node, ok := r.rollerPool.Get(pk); ok {
|
||||
roller := node.(*rollerNode)
|
||||
roller.TaskIDs.Pop(id)
|
||||
}
|
||||
}
|
||||
|
||||
// GetNumberOfIdleRollers return the count of idle rollers.
|
||||
func (r *rollerManager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
|
||||
for item := range r.rollerPool.IterBuffered() {
|
||||
roller := item.Val.(*rollerNode)
|
||||
if roller.TaskIDs.Count() == 0 && roller.Type == rollerType {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (r *rollerManager) selectRoller(rollerType message.ProofType) *rollerNode {
|
||||
pubkeys := r.rollerPool.Keys()
|
||||
for len(pubkeys) > 0 {
|
||||
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
|
||||
if val, ok := r.rollerPool.Get(pubkeys[idx.Int64()]); ok {
|
||||
rn := val.(*rollerNode)
|
||||
if rn.TaskIDs.Count() == 0 && rn.Type == rollerType {
|
||||
return rn
|
||||
}
|
||||
}
|
||||
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
7
coordinator/internal/logic/verifier/assets/agg_proof
Normal file
7
coordinator/internal/logic/verifier/assets/agg_proof
Normal file
File diff suppressed because one or more lines are too long
BIN
coordinator/internal/logic/verifier/assets/agg_vk
Normal file
BIN
coordinator/internal/logic/verifier/assets/agg_vk
Normal file
Binary file not shown.
@@ -11,8 +11,7 @@ import (
|
||||
const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a mock halo2 verifier.
|
||||
type Verifier struct {
|
||||
}
|
||||
type Verifier struct{}
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
|
||||
@@ -1,6 +1,6 @@
|
||||
//go:build ffi
|
||||
|
||||
package verifier_test
|
||||
package verifier
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -11,16 +11,27 @@ import (
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
<<<<<<< HEAD:coordinator/internal/logic/verifier/verifier_test.go
|
||||
"scroll-tech/coordinator/config"
|
||||
=======
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
>>>>>>> 6841ef264c163c158446d94d8ea48336aca8498e:coordinator/verifier/verifier_test.go
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
<<<<<<< HEAD:coordinator/internal/logic/verifier/verifier_test.go
|
||||
const (
|
||||
paramsPath = "./assets/test_params"
|
||||
aggVkPath = "./assets/agg_vk"
|
||||
proofPath = "./assets/agg_proof"
|
||||
=======
|
||||
var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
aggVkPath = flag.String("vk", "/assets/agg_vk", "aggregation proof verification key path")
|
||||
proofPath = flag.String("proof", "/assets/agg_proof", "aggregation proof path")
|
||||
>>>>>>> 6841ef264c163c158446d94d8ea48336aca8498e:coordinator/verifier/verifier_test.go
|
||||
)
|
||||
|
||||
func TestFFI(t *testing.T) {
|
||||
@@ -30,7 +41,7 @@ func TestFFI(t *testing.T) {
|
||||
ParamsPath: *paramsPath,
|
||||
AggVkPath: *aggVkPath,
|
||||
}
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
v, err := NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
f, err := os.Open(*proofPath)
|
||||
@@ -215,7 +215,11 @@ func (o *Chunk) UpdateChunkProofsStatusByBatchHash(ctx context.Context, batchHas
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a batch.
|
||||
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus) error {
|
||||
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
@@ -228,7 +232,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
@@ -239,7 +243,11 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
}
|
||||
|
||||
// UpdateProofByHash updates the batch proof by hash.
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -249,7 +257,7 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
|
||||
@@ -245,7 +245,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk) (*Chunk, er
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a chunk.
|
||||
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus) error {
|
||||
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
@@ -257,8 +257,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
@@ -269,7 +272,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
}
|
||||
|
||||
// UpdateProofByHash updates the chunk proof by hash.
|
||||
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -279,7 +286,7 @@ func (o *Chunk) UpdateProofByHash(ctx context.Context, hash string, proof *messa
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
"time"
|
||||
|
||||
"github.com/shopspring/decimal"
|
||||
@@ -49,6 +50,28 @@ func (*ProverTask) TableName() string {
|
||||
return "prover_task"
|
||||
}
|
||||
|
||||
// GetProverTasks get prover tasks
|
||||
func (o *ProverTask) GetProverTasks(fields map[string]interface{}, orderByList []string, limit int) ([]ProverTask, error) {
|
||||
var proverTasks []ProverTask
|
||||
db := o.db
|
||||
for k, v := range fields {
|
||||
db = db.Where(k, v)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit != 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
if err := db.Find(&proverTasks).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// GetProverTasksByHashes retrieves the ProverTask records associated with the specified hashes.
|
||||
// The returned prover task objects are sorted in ascending order by their ids.
|
||||
func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string) ([]*ProverTask, error) {
|
||||
@@ -68,9 +91,42 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, hashes []string
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// SetProverTask updates or inserts a ProverTask record.
|
||||
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask) error {
|
||||
// GetProverTaskByHashAndPubKey get prover task hash and public key
|
||||
func (o *ProverTask) GetProverTaskByHashAndPubKey(ctx context.Context, hash, proverPublicKey string) (*ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("hash", hash).Where("roller_public_key", proverPublicKey)
|
||||
|
||||
var proverTask ProverTask
|
||||
err := db.First(&proverTask).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetProverTaskByHashAndPubKey err:%w, hash:%s, pubukey:%s", err, hash, proverPublicKey)
|
||||
}
|
||||
return &proverTask, nil
|
||||
}
|
||||
|
||||
// GetAssignedProverTasks get the unassigned prover task
|
||||
func (o *ProverTask) GetAssignedProverTasks(ctx context.Context, limit int) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("proving_status", int(types.RollerAssigned))
|
||||
db = db.Limit(limit)
|
||||
|
||||
var proverTasks []ProverTask
|
||||
err := db.Find(&proverTasks).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ProverTask.GetAssignedProverTasks error:%w", err)
|
||||
}
|
||||
return proverTasks, nil
|
||||
}
|
||||
|
||||
// SetProverTask updates or inserts a ProverTask record.
|
||||
func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask, dbTX ...*gorm.DB) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
|
||||
@@ -84,13 +140,30 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask)
|
||||
}
|
||||
|
||||
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
|
||||
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus) error {
|
||||
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.RollerProveStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", proofType, taskID, pk)
|
||||
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
|
||||
|
||||
if err := db.Update("proving_status", status).Error; err != nil {
|
||||
return fmt.Errorf("ProverTask.UpdateProverTaskProvingStatus error: %w, proof type: %v, taskID: %v, prover public key: %v, status: %v", err, proofType.String(), taskID, pk, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProverTaskFailureType update the prover task failure type
|
||||
func (o *ProverTask) UpdateProverTaskFailureType(ctx context.Context, proofType message.ProofType, hash string, pk string, failureType coordinatorType.ProverTaskFailureType, dbTX ...*gorm.DB) error {
|
||||
db := o.db.WithContext(ctx)
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("hash", hash).Where("roller_public_key", pk).Where("task_type", int(proofType))
|
||||
if err := db.Update("failure_type", int(failureType)).Error; err != nil {
|
||||
return fmt.Errorf("ProverTask.UpdateProverTaskFailureType error: %w, proof type: %v, taskID: %v, prover public key: %v, failure type: %v", err, proofType.String(), hash, pk, failureType.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
21
coordinator/internal/types/block.go
Normal file
21
coordinator/internal/types/block.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
}
|
||||
|
||||
// BatchInfo contains the BlockBatch's main info
|
||||
type BatchInfo struct {
|
||||
Index uint64 `json:"index"`
|
||||
Hash string `json:"hash"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}
|
||||
22
coordinator/internal/types/prover_task.go
Normal file
22
coordinator/internal/types/prover_task.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package types
|
||||
|
||||
// ProverTaskFailureType the type of prover task failure
|
||||
type ProverTaskFailureType int
|
||||
|
||||
const (
|
||||
// ProverTaskFailureTypeUnknown prover task unknown error
|
||||
ProverTaskFailureTypeUnknown ProverTaskFailureType = iota
|
||||
// ProverTaskFailureTypeTimeout prover task failure of timeout
|
||||
ProverTaskFailureTypeTimeout
|
||||
)
|
||||
|
||||
func (r ProverTaskFailureType) String() string {
|
||||
switch r {
|
||||
case ProverTaskFailureTypeUnknown:
|
||||
return "prover task failure unknown"
|
||||
case ProverTaskFailureTypeTimeout:
|
||||
return "prover task failure timeout"
|
||||
default:
|
||||
return "illegal failure type"
|
||||
}
|
||||
}
|
||||
21
coordinator/internal/types/rollers_info.go
Normal file
21
coordinator/internal/types/rollers_info.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// RollersInfo is assigned rollers info of a block batch (session)
|
||||
type RollersInfo struct {
|
||||
ID string `json:"id"`
|
||||
RollerStatusList []*RollerStatus `json:"rollers"`
|
||||
StartTimestamp int64 `json:"start_timestamp"`
|
||||
ProveType message.ProofType `json:"prove_type,omitempty"`
|
||||
}
|
||||
|
||||
// RollerStatus is the roller name and roller prove status
|
||||
type RollerStatus struct {
|
||||
PublicKey string `json:"public_key"`
|
||||
Name string `json:"name"`
|
||||
Status types.RollerProveStatus `json:"status"`
|
||||
}
|
||||
@@ -1,853 +0,0 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"golang.org/x/exp/rand"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils/workerpool"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
)
|
||||
|
||||
var (
|
||||
// proofs
|
||||
coordinatorProofsReceivedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/proofs/received/total", metrics.ScrollRegistry)
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/success/time", metrics.ScrollRegistry)
|
||||
coordinatorProofsVerifiedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/verified/failed/time", metrics.ScrollRegistry)
|
||||
coordinatorProofsGeneratedFailedTimeTimer = geth_metrics.NewRegisteredTimer("coordinator/proofs/generated/failed/time", metrics.ScrollRegistry)
|
||||
|
||||
// sessions
|
||||
coordinatorSessionsSuccessTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/success/total", metrics.ScrollRegistry)
|
||||
coordinatorSessionsTimeoutTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
|
||||
coordinatorSessionsFailedTotalCounter = geth_metrics.NewRegisteredCounter("coordinator/sessions/failed/total", metrics.ScrollRegistry)
|
||||
|
||||
coordinatorSessionsActiveNumberGauge = geth_metrics.NewRegisteredCounter("coordinator/sessions/active/number", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
const (
|
||||
proofAndPkBufferSize = 10
|
||||
)
|
||||
|
||||
type rollerProofStatus struct {
|
||||
id string
|
||||
typ message.ProofType
|
||||
pk string
|
||||
status types.RollerProveStatus
|
||||
}
|
||||
|
||||
// Contains all the information on an ongoing proof generation session.
|
||||
type session struct {
|
||||
taskID string
|
||||
proverTasks []*orm.ProverTask
|
||||
// finish channel is used to pass the public key of the rollers who finished proving process.
|
||||
finishChan chan rollerProofStatus
|
||||
}
|
||||
|
||||
// Manager is responsible for maintaining connections with active rollers,
|
||||
// sending the challenges, and receiving proofs. It also regulates the reward
|
||||
// distribution. All read and write logic and connection handling happens through
|
||||
// a modular websocket server, contained within the Manager. Incoming messages are
|
||||
// then passed to the Manager where the actual handling logic resides.
|
||||
type Manager struct {
|
||||
// The manager context.
|
||||
ctx context.Context
|
||||
|
||||
// The roller manager configuration.
|
||||
cfg *config.RollerManagerConfig
|
||||
|
||||
// The indicator whether the backend is running or not.
|
||||
running int32
|
||||
|
||||
// A mutex guarding the boolean below.
|
||||
mu sync.RWMutex
|
||||
// A map containing all active proof generation sessions.
|
||||
sessions map[string]*session
|
||||
// A map containing proof failed or verify failed proof.
|
||||
rollerPool cmap.ConcurrentMap
|
||||
|
||||
failedSessionInfos map[string]*SessionInfo
|
||||
|
||||
// A direct connection to the Halo2 verifier, used to verify
|
||||
// incoming proofs.
|
||||
verifier *verifier.Verifier
|
||||
|
||||
// orm interface
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
proverTaskOrm *orm.ProverTask
|
||||
|
||||
// Token cache
|
||||
tokenCache *cache.Cache
|
||||
// A mutex guarding registration
|
||||
registerMu sync.RWMutex
|
||||
|
||||
// Verifier worker pool
|
||||
verifierWorkerPool *workerpool.WorkerPool
|
||||
}
|
||||
|
||||
// New returns a new instance of Manager. The instance will be not fully prepared,
|
||||
// and still needs to be finalized and ran by calling `manager.Start`.
|
||||
func New(ctx context.Context, cfg *config.RollerManagerConfig, db *gorm.DB) (*Manager, error) {
|
||||
v, err := verifier.NewVerifier(cfg.Verifier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info("Start coordinator successfully.")
|
||||
return &Manager{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
rollerPool: cmap.New(),
|
||||
sessions: make(map[string]*session),
|
||||
failedSessionInfos: make(map[string]*SessionInfo),
|
||||
verifier: v,
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
|
||||
verifierWorkerPool: workerpool.NewWorkerPool(cfg.MaxVerifierWorkers),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start the Manager module.
|
||||
func (m *Manager) Start() error {
|
||||
if m.isRunning() {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.verifierWorkerPool.Run()
|
||||
m.restorePrevSessions()
|
||||
|
||||
atomic.StoreInt32(&m.running, 1)
|
||||
|
||||
go m.Loop()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the Manager module, for a graceful shutdown.
|
||||
func (m *Manager) Stop() {
|
||||
if !m.isRunning() {
|
||||
return
|
||||
}
|
||||
m.verifierWorkerPool.Stop()
|
||||
|
||||
atomic.StoreInt32(&m.running, 0)
|
||||
}
|
||||
|
||||
// isRunning returns an indicator whether manager is running or not.
|
||||
func (m *Manager) isRunning() bool {
|
||||
return atomic.LoadInt32(&m.running) == 1
|
||||
}
|
||||
|
||||
// Loop keeps the manager running.
|
||||
func (m *Manager) Loop() {
|
||||
var (
|
||||
tick = time.NewTicker(time.Second * 2)
|
||||
chunkTasks []*orm.Chunk
|
||||
batchTasks []*orm.Batch
|
||||
)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
// load and send batch tasks
|
||||
if len(batchTasks) == 0 {
|
||||
var err error
|
||||
batchTasks, err = m.batchOrm.GetUnassignedBatches(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
if err != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks", "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Select batch type roller and send message
|
||||
for len(batchTasks) > 0 && m.StartBatchProofGenerationSession(batchTasks[0], nil) {
|
||||
batchTasks = batchTasks[1:]
|
||||
}
|
||||
|
||||
// load and send chunk tasks
|
||||
if len(chunkTasks) == 0 {
|
||||
// TODO: add cache
|
||||
var err error
|
||||
chunkTasks, err = m.chunkOrm.GetUnassignedChunks(m.ctx, m.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
if err != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks", "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Select chunk type roller and send message
|
||||
for len(chunkTasks) > 0 && m.StartChunkProofGenerationSession(chunkTasks[0], nil) {
|
||||
chunkTasks = chunkTasks[1:]
|
||||
}
|
||||
case <-m.ctx.Done():
|
||||
if m.ctx.Err() != nil {
|
||||
log.Error(
|
||||
"manager context canceled with error",
|
||||
"error", m.ctx.Err(),
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) restorePrevSessions() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
var hashes []string
|
||||
// load assigned batch tasks from db
|
||||
batchTasks, err := m.batchOrm.GetAssignedBatches(m.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to load assigned batch tasks from db", "error", err)
|
||||
return
|
||||
}
|
||||
for _, batchTask := range batchTasks {
|
||||
hashes = append(hashes, batchTask.Hash)
|
||||
}
|
||||
// load assigned chunk tasks from db
|
||||
chunkTasks, err := m.chunkOrm.GetAssignedChunks(m.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get assigned batch batchHashes from db", "error", err)
|
||||
return
|
||||
}
|
||||
for _, chunkTask := range chunkTasks {
|
||||
hashes = append(hashes, chunkTask.Hash)
|
||||
}
|
||||
prevSessions, err := m.proverTaskOrm.GetProverTasksByHashes(m.ctx, hashes)
|
||||
if err != nil {
|
||||
log.Error("failed to recover roller session info from db", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
proverTasksMaps := make(map[string][]*orm.ProverTask)
|
||||
for _, v := range prevSessions {
|
||||
log.Info("restore roller info for session", "session start time", v.CreatedAt, "session id", v.TaskID, "roller name",
|
||||
v.ProverName, "proof type", v.TaskType, "public key", v.ProverPublicKey, "proof status", v.ProvingStatus)
|
||||
proverTasksMaps[v.TaskID] = append(proverTasksMaps[v.TaskID], v)
|
||||
}
|
||||
|
||||
for taskID, proverTasks := range proverTasksMaps {
|
||||
sess := &session{
|
||||
taskID: taskID,
|
||||
proverTasks: proverTasks,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
m.sessions[taskID] = sess
|
||||
go m.CollectProofs(sess)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleZkProof handle a ZkProof submitted from a roller.
|
||||
// For now only proving/verifying error will lead to setting status as skipped.
|
||||
// db/unmarshal errors will not because they are errors on the business logic side.
|
||||
func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
var dbErr error
|
||||
var success bool
|
||||
|
||||
// Assess if the proof generation session for the given ID is still active.
|
||||
// We hold the read lock until the end of the function so that there is no
|
||||
// potential race for channel deletion.
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
sess, ok := m.sessions[msg.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf("proof generation session for id %v does not existID", msg.ID)
|
||||
}
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
for _, si := range sess.proverTasks {
|
||||
// get the send session info of this proof msg
|
||||
if si.TaskID == msg.ID && si.ProverPublicKey == pk {
|
||||
proverTask = si
|
||||
}
|
||||
}
|
||||
|
||||
if proverTask == nil {
|
||||
return fmt.Errorf("proof generation session for id %v pk:%s does not existID", msg.ID, pk)
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
// Ensure this roller is eligible to participate in the prover task.
|
||||
if types.RollerProveStatus(proverTask.ProvingStatus) == types.RollerProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the roller for each submission of invalid proof
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn(
|
||||
"roller has already submitted valid proof in proof session",
|
||||
"roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey,
|
||||
"proof type", proverTask.TaskType,
|
||||
"proof id", msg.ID,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("handling zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName, "roller pk",
|
||||
proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof time", proofTimeSec)
|
||||
|
||||
defer func() {
|
||||
// TODO: maybe we should use db tx for the whole process?
|
||||
// Roll back current proof's status.
|
||||
if dbErr != nil {
|
||||
if msg.Type == message.ProofTypeChunk {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset chunk task status as Unassigned", "msg.ID", msg.ID)
|
||||
}
|
||||
}
|
||||
if msg.Type == message.ProofTypeBatch {
|
||||
if err := m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset batch task status as Unassigned", "msg.ID", msg.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
// set proof status
|
||||
status := types.RollerProofInvalid
|
||||
if success && dbErr == nil {
|
||||
status = types.RollerProofValid
|
||||
}
|
||||
// notify the session that the roller finishes the proving process
|
||||
sess.finishChan <- rollerProofStatus{msg.ID, msg.Type, pk, status}
|
||||
}()
|
||||
|
||||
if msg.Status != message.StatusOk {
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsGeneratedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
|
||||
log.Info(
|
||||
"proof generated by roller failed",
|
||||
"proof id", msg.ID,
|
||||
"roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey,
|
||||
"proof type", msg.Type,
|
||||
"proof time", proofTimeSec,
|
||||
"error", msg.Error,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// store proof content
|
||||
if msg.Type == message.ProofTypeChunk {
|
||||
if dbErr = m.chunkOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store chunk proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
|
||||
log.Error("failed to update chunk task status as proved", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
if msg.Type == message.ProofTypeBatch {
|
||||
if dbErr = m.batchOrm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store batch proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskProved); dbErr != nil {
|
||||
log.Error("failed to update batch task status as proved", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
|
||||
coordinatorProofsReceivedTotalCounter.Inc(1)
|
||||
|
||||
var verifyErr error
|
||||
// TODO: wrap both chunk verifier and batch verifier
|
||||
success, verifyErr = m.verifyProof(msg.Proof)
|
||||
if verifyErr != nil {
|
||||
// TODO: this is only a temp workaround for testnet, we should return err in real cases
|
||||
success = false
|
||||
log.Error("Failed to verify zk proof", "proof id", msg.ID, "roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
// TODO: Roller needs to be slashed if proof is invalid.
|
||||
}
|
||||
|
||||
if success {
|
||||
if msg.Type == message.ProofTypeChunk {
|
||||
if dbErr = m.chunkOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
log.Error(
|
||||
"failed to update chunk proving_status",
|
||||
"msg.ID", msg.ID,
|
||||
"status", types.ProvingTaskVerified,
|
||||
"error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if err := m.checkAreAllChunkProofsReady(msg.ID); err != nil {
|
||||
log.Error("failed to check are all chunk proofs ready", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if msg.Type == message.ProofTypeBatch {
|
||||
if dbErr = m.batchOrm.UpdateProvingStatus(m.ctx, msg.ID, types.ProvingTaskVerified); dbErr != nil {
|
||||
log.Error(
|
||||
"failed to update batch proving_status",
|
||||
"msg.ID", msg.ID,
|
||||
"status", types.ProvingTaskVerified,
|
||||
"error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
|
||||
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedSuccessTimeTimer(proverTask.ProverPublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator success", "proof id", msg.ID, "roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec)
|
||||
} else {
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
m.updateMetricRollerProofsVerifiedFailedTimeTimer(proverTask.ProverPublicKey, proofTime)
|
||||
log.Info("proof verified by coordinator failed", "proof id", msg.ID, "roller name", proverTask.ProverName,
|
||||
"roller pk", proverTask.ProverPublicKey, "proof type", msg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) checkAreAllChunkProofsReady(chunkHash string) error {
|
||||
batchHash, err := m.chunkOrm.GetChunkBatchHash(m.ctx, chunkHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allReady, err := m.chunkOrm.CheckIfBatchChunkProofsAreReady(m.ctx, batchHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if allReady {
|
||||
err := m.chunkOrm.UpdateChunkProofsStatusByBatchHash(m.ctx, batchHash, types.ChunkProofsStatusReady)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkAttempts use the count of prover task info to check the attempts
|
||||
func (m *Manager) checkAttemptsExceeded(hash string) bool {
|
||||
proverTasks, err := m.proverTaskOrm.GetProverTasksByHashes(context.Background(), []string{hash})
|
||||
if err != nil {
|
||||
log.Error("get session info error", "hash id", hash, "error", err)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(proverTasks) >= int(m.cfg.SessionAttempts) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectProofs collects proofs corresponding to a proof generation session.
|
||||
func (m *Manager) CollectProofs(sess *session) {
|
||||
coordinatorSessionsActiveNumberGauge.Inc(1)
|
||||
defer coordinatorSessionsActiveNumberGauge.Dec(1)
|
||||
|
||||
for {
|
||||
select {
|
||||
//Execute after timeout, set in config.json. Consider all rollers failed.
|
||||
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
|
||||
if !m.checkAttemptsExceeded(sess.taskID) {
|
||||
var success bool
|
||||
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
|
||||
success = m.StartBatchProofGenerationSession(nil, sess)
|
||||
} else if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
|
||||
success = m.StartChunkProofGenerationSession(nil, sess)
|
||||
}
|
||||
if success {
|
||||
m.mu.Lock()
|
||||
for _, v := range sess.proverTasks {
|
||||
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
log.Info("Retrying session", "session id:", sess.taskID)
|
||||
return
|
||||
}
|
||||
}
|
||||
// record failed session.
|
||||
errMsg := "proof generation session ended without receiving any valid proofs"
|
||||
m.addFailedSession(sess, errMsg)
|
||||
log.Warn(errMsg, "session id", sess.taskID)
|
||||
// Set status as skipped.
|
||||
// Note that this is only a workaround for testnet here.
|
||||
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
|
||||
// so as to re-distribute the task in the future
|
||||
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeChunk {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset chunk task_status as Unassigned", "task id", sess.taskID, "err", err)
|
||||
}
|
||||
}
|
||||
if message.ProofType(sess.proverTasks[0].TaskType) == message.ProofTypeBatch {
|
||||
if err := m.batchOrm.UpdateProvingStatus(m.ctx, sess.taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset batch task_status as Unassigned", "task id", sess.taskID, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
for _, v := range sess.proverTasks {
|
||||
m.freeTaskIDForRoller(v.ProverPublicKey, v.TaskID)
|
||||
}
|
||||
delete(m.sessions, sess.taskID)
|
||||
m.mu.Unlock()
|
||||
coordinatorSessionsTimeoutTotalCounter.Inc(1)
|
||||
return
|
||||
|
||||
//Execute after one of the roller finishes sending proof, return early if all rollers had sent results.
|
||||
case ret := <-sess.finishChan:
|
||||
m.mu.Lock()
|
||||
for idx := range sess.proverTasks {
|
||||
if sess.proverTasks[idx].ProverPublicKey == ret.pk {
|
||||
sess.proverTasks[idx].ProvingStatus = int16(ret.status)
|
||||
}
|
||||
}
|
||||
|
||||
if sess.isSessionFailed() {
|
||||
if ret.typ == message.ProofTypeChunk {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update chunk proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
if ret.typ == message.ProofTypeBatch {
|
||||
if err := m.batchOrm.UpdateProvingStatus(m.ctx, ret.id, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("failed to update batch proving_status as failed", "msg.ID", ret.id, "error", err)
|
||||
}
|
||||
}
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
|
||||
if err := m.proverTaskOrm.UpdateProverTaskProvingStatus(m.ctx, ret.typ, ret.id, ret.pk, ret.status); err != nil {
|
||||
log.Error("failed to update session info proving status",
|
||||
"proof type", ret.typ, "task id", ret.id, "pk", ret.pk, "status", ret.status, "error", err)
|
||||
}
|
||||
|
||||
//Check if all rollers have finished their tasks, and rollers with valid results are indexed by public key.
|
||||
finished, validRollers := sess.isRollersFinished()
|
||||
|
||||
//When all rollers have finished submitting their tasks, select a winner within rollers with valid proof, and return, terminate the for loop.
|
||||
if finished && len(validRollers) > 0 {
|
||||
//Select a random index for this slice.
|
||||
randIndex := rand.Int63n(int64(len(validRollers)))
|
||||
_ = validRollers[randIndex]
|
||||
// TODO: reward winner
|
||||
for _, proverTask := range sess.proverTasks {
|
||||
m.freeTaskIDForRoller(proverTask.ProverPublicKey, proverTask.TaskID)
|
||||
delete(m.sessions, proverTask.TaskID)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
coordinatorSessionsSuccessTotalCounter.Inc(1)
|
||||
return
|
||||
}
|
||||
m.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isRollersFinished checks if all rollers have finished submitting proofs, check their validity, and record rollers who produce valid proof.
|
||||
// When rollersLeft reaches 0, it means all rollers have finished their tasks.
|
||||
// validRollers also records the public keys of rollers who have finished their tasks correctly as index.
|
||||
func (s *session) isRollersFinished() (bool, []string) {
|
||||
var validRollers []string
|
||||
for _, sessionInfo := range s.proverTasks {
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofValid {
|
||||
validRollers = append(validRollers, sessionInfo.ProverPublicKey)
|
||||
continue
|
||||
}
|
||||
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) == types.RollerProofInvalid {
|
||||
continue
|
||||
}
|
||||
|
||||
// Some rollers are still proving.
|
||||
return false, nil
|
||||
}
|
||||
return true, validRollers
|
||||
}
|
||||
|
||||
func (s *session) isSessionFailed() bool {
|
||||
for _, sessionInfo := range s.proverTasks {
|
||||
if types.RollerProveStatus(sessionInfo.ProvingStatus) != types.RollerProofInvalid {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// APIs collect API services.
|
||||
func (m *Manager) APIs() []rpc.API {
|
||||
return []rpc.API{
|
||||
{
|
||||
Namespace: "roller",
|
||||
Service: RollerAPI(m),
|
||||
Public: true,
|
||||
},
|
||||
{
|
||||
Namespace: "debug",
|
||||
Public: true,
|
||||
Service: RollerDebugAPI(m),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// StartChunkProofGenerationSession starts a chunk proof generation session
|
||||
func (m *Manager) StartChunkProofGenerationSession(task *orm.Chunk, prevSession *session) (success bool) {
|
||||
var taskID string
|
||||
if task != nil {
|
||||
taskID = task.Hash
|
||||
} else {
|
||||
taskID = prevSession.taskID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
|
||||
log.Warn("no idle chunk roller when starting proof generation session", "id", taskID)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start chunk proof generation session", "id", taskID)
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
if task != nil {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
|
||||
}
|
||||
} else {
|
||||
if err := m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Get block hashes.
|
||||
wrappedBlocks, err := m.l2BlockOrm.GetL2BlocksByChunkHash(m.ctx, taskID)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"Failed to fetch wrapped blocks",
|
||||
"batch hash", taskID,
|
||||
"error", err,
|
||||
)
|
||||
return false
|
||||
}
|
||||
blockHashes := make([]common.Hash, len(wrappedBlocks))
|
||||
for i, wrappedBlock := range wrappedBlocks {
|
||||
blockHashes[i] = wrappedBlock.Header.Hash()
|
||||
}
|
||||
|
||||
// Dispatch task to chunk rollers.
|
||||
var proverTasks []*orm.ProverTask
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.ProofTypeChunk)
|
||||
if roller == nil {
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(&message.TaskMsg{ID: taskID, Type: message.ProofTypeChunk, BlockHashes: blockHashes}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
|
||||
continue
|
||||
}
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: taskID,
|
||||
ProverPublicKey: roller.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: roller.Name,
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
FailureType: int16(types.RollerFailureTypeUndefined),
|
||||
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
|
||||
}
|
||||
// Store prover task info.
|
||||
if err = m.proverTaskOrm.SetProverTask(m.ctx, &proverTask); err != nil {
|
||||
log.Error("db set session info fail", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
proverTasks = append(proverTasks, &proverTask)
|
||||
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeChunk, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
|
||||
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(proverTasks) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle chunk rollers", m.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.chunkOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskID, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
taskID: taskID,
|
||||
proverTasks: proverTasks,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[taskID] = sess
|
||||
m.mu.Unlock()
|
||||
go m.CollectProofs(sess)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// StartBatchProofGenerationSession starts an batch proof generation.
|
||||
func (m *Manager) StartBatchProofGenerationSession(task *orm.Batch, prevSession *session) (success bool) {
|
||||
var taskID string
|
||||
if task != nil {
|
||||
taskID = task.Hash
|
||||
} else {
|
||||
taskID = prevSession.taskID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.ProofTypeBatch) == 0 {
|
||||
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "id", taskID)
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
if task != nil {
|
||||
if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
|
||||
} else if err := m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
// get chunk proofs from db
|
||||
chunkProofs, err := m.chunkOrm.GetProofsByBatchHash(m.ctx, taskID)
|
||||
if err != nil {
|
||||
log.Error("failed to get chunk proofs for batch task", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Dispatch task to chunk rollers.
|
||||
var proverTasks []*orm.ProverTask
|
||||
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
|
||||
roller := m.selectRoller(message.ProofTypeBatch)
|
||||
if roller == nil {
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "type", roller.Type, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(&message.TaskMsg{
|
||||
ID: taskID,
|
||||
Type: message.ProofTypeBatch,
|
||||
SubProofs: chunkProofs,
|
||||
}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
|
||||
continue
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: taskID,
|
||||
ProverPublicKey: roller.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: roller.Name,
|
||||
ProvingStatus: int16(types.RollerAssigned),
|
||||
FailureType: int16(types.RollerFailureTypeUndefined),
|
||||
CreatedAt: time.Now(), // Used in proverTasks, should be explicitly assigned here.
|
||||
}
|
||||
// Store session info.
|
||||
if err = m.proverTaskOrm.SetProverTask(context.Background(), &proverTask); err != nil {
|
||||
log.Error("db set session info fail", "session id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
proverTasks = append(proverTasks, &proverTask)
|
||||
log.Info("assigned proof to roller", "session id", taskID, "session type", message.ProofTypeBatch, "roller name", roller.Name,
|
||||
"roller pk", roller.PublicKey, "proof status", proverTask.ProvingStatus)
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(proverTasks) == 0 {
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle batch rollers", m.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.batchOrm.UpdateProvingStatus(m.ctx, taskID, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskID, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
taskID: taskID,
|
||||
proverTasks: proverTasks,
|
||||
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[taskID] = sess
|
||||
m.mu.Unlock()
|
||||
go m.CollectProofs(sess)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Manager) addFailedSession(sess *session, errMsg string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.failedSessionInfos[sess.taskID] = newSessionInfo(sess, types.ProvingTaskFailed, errMsg, true)
|
||||
}
|
||||
|
||||
// VerifyToken verifies pukey for token and expiration time
|
||||
func (m *Manager) VerifyToken(authMsg *message.AuthMsg) (bool, error) {
|
||||
pubkey, _ := authMsg.PublicKey()
|
||||
// GetValue returns nil if value is expired
|
||||
if token, ok := m.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
|
||||
return false, fmt.Errorf("failed to find corresponding token. roller name: %s. roller pk: %s", authMsg.Identity.Name, pubkey)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *Manager) addVerifyTask(proof *message.AggProof) chan verifyResult {
|
||||
c := make(chan verifyResult, 1)
|
||||
m.verifierWorkerPool.AddTask(func() {
|
||||
result, err := m.verifier.VerifyProof(proof)
|
||||
c <- verifyResult{result, err}
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
func (m *Manager) verifyProof(proof *message.AggProof) (bool, error) {
|
||||
if !m.isRunning() {
|
||||
return false, errors.New("coordinator has stopped before verification")
|
||||
}
|
||||
verifyResultChan := m.addVerifyTask(proof)
|
||||
result := <-verifyResultChan
|
||||
return result.result, result.err
|
||||
}
|
||||
|
||||
type verifyResult struct {
|
||||
result bool
|
||||
err error
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
type rollerMetrics struct {
|
||||
rollerProofsVerifiedSuccessTimeTimer geth_metrics.Timer
|
||||
rollerProofsVerifiedFailedTimeTimer geth_metrics.Timer
|
||||
rollerProofsGeneratedFailedTimeTimer geth_metrics.Timer
|
||||
rollerProofsLastAssignedTimestampGauge geth_metrics.Gauge
|
||||
rollerProofsLastFinishedTimestampGauge geth_metrics.Gauge
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsLastFinishedTimestampGauge(pk string) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsLastAssignedTimestampGauge(pk string) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedSuccessTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsVerifiedFailedTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) updateMetricRollerProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
rMs := node.(*rollerNode).metrics
|
||||
if rMs != nil {
|
||||
rMs.rollerProofsGeneratedFailedTimeTimer.Update(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package coordinator
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// rollerNode records roller status and send task to connected roller.
|
||||
type rollerNode struct {
|
||||
// Roller name
|
||||
Name string
|
||||
// Roller type
|
||||
Type message.ProofType
|
||||
// Roller public key
|
||||
PublicKey string
|
||||
// Roller version
|
||||
Version string
|
||||
|
||||
// task channel
|
||||
taskChan chan *message.TaskMsg
|
||||
// session id list which delivered to roller.
|
||||
TaskIDs cmap.ConcurrentMap
|
||||
|
||||
// Time of message creation
|
||||
registerTime time.Time
|
||||
|
||||
metrics *rollerMetrics
|
||||
}
|
||||
|
||||
func (r *rollerNode) sendTask(msg *message.TaskMsg) bool {
|
||||
select {
|
||||
case r.taskChan <- msg:
|
||||
r.TaskIDs.Set(msg.ID, struct{}{})
|
||||
default:
|
||||
log.Warn("roller channel is full", "roller name", r.Name, "public key", r.PublicKey)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Manager) reloadRollerAssignedTasks(pubkey string) *cmap.ConcurrentMap {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
taskIDs := cmap.New()
|
||||
for id, sess := range m.sessions {
|
||||
for _, proverTask := range sess.proverTasks {
|
||||
if proverTask.ProverPublicKey == pubkey && proverTask.ProvingStatus == int16(types.RollerAssigned) {
|
||||
taskIDs.Set(id, struct{}{})
|
||||
}
|
||||
}
|
||||
}
|
||||
return &taskIDs
|
||||
}
|
||||
|
||||
func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
|
||||
node, ok := m.rollerPool.Get(pubkey)
|
||||
if !ok {
|
||||
taskIDs := m.reloadRollerAssignedTasks(pubkey)
|
||||
rMs := &rollerMetrics{
|
||||
rollerProofsVerifiedSuccessTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/success/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsVerifiedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/verified/failed/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsGeneratedFailedTimeTimer: geth_metrics.GetOrRegisterTimer(fmt.Sprintf("roller/proofs/generated/failed/time/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsLastAssignedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/assigned/timestamp/%s", pubkey), metrics.ScrollRegistry),
|
||||
rollerProofsLastFinishedTimestampGauge: geth_metrics.GetOrRegisterGauge(fmt.Sprintf("roller/proofs/last/finished/timestamp/%s", pubkey), metrics.ScrollRegistry),
|
||||
}
|
||||
node = &rollerNode{
|
||||
Name: identity.Name,
|
||||
Type: identity.RollerType,
|
||||
Version: identity.Version,
|
||||
PublicKey: pubkey,
|
||||
TaskIDs: *taskIDs,
|
||||
taskChan: make(chan *message.TaskMsg, 4),
|
||||
metrics: rMs,
|
||||
}
|
||||
m.rollerPool.Set(pubkey, node)
|
||||
}
|
||||
roller := node.(*rollerNode)
|
||||
// avoid reconnection too frequently.
|
||||
if time.Since(roller.registerTime) < 60 {
|
||||
log.Warn("roller reconnect too frequently", "prover_name", identity.Name, "roller_type", identity.RollerType, "public key", pubkey)
|
||||
return nil, fmt.Errorf("roller reconnect too frequently")
|
||||
}
|
||||
// update register time and status
|
||||
roller.registerTime = time.Now()
|
||||
|
||||
return roller.taskChan, nil
|
||||
}
|
||||
|
||||
func (m *Manager) freeRoller(pk string) {
|
||||
m.rollerPool.Pop(pk)
|
||||
}
|
||||
|
||||
func (m *Manager) existTaskIDForRoller(pk string, id string) bool {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
r := node.(*rollerNode)
|
||||
return r.TaskIDs.Has(id)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Manager) freeTaskIDForRoller(pk string, id string) {
|
||||
if node, ok := m.rollerPool.Get(pk); ok {
|
||||
r := node.(*rollerNode)
|
||||
r.TaskIDs.Pop(id)
|
||||
}
|
||||
}
|
||||
|
||||
// GetNumberOfIdleRollers return the count of idle rollers.
|
||||
func (m *Manager) GetNumberOfIdleRollers(rollerType message.ProofType) (count int) {
|
||||
for _, pk := range m.rollerPool.Keys() {
|
||||
if val, ok := m.rollerPool.Get(pk); ok {
|
||||
r := val.(*rollerNode)
|
||||
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (m *Manager) selectRoller(rollerType message.ProofType) *rollerNode {
|
||||
pubkeys := m.rollerPool.Keys()
|
||||
for len(pubkeys) > 0 {
|
||||
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
|
||||
if val, ok := m.rollerPool.Get(pubkeys[idx.Int64()]); ok {
|
||||
r := val.(*rollerNode)
|
||||
if r.TaskIDs.Count() == 0 && r.Type == rollerType {
|
||||
return r
|
||||
}
|
||||
}
|
||||
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +1,19 @@
|
||||
package coordinator_test
|
||||
package test
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -26,29 +21,23 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/coordinator"
|
||||
client2 "scroll-tech/coordinator/client"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
"scroll-tech/coordinator/verifier"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/coordinator/client"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
"scroll-tech/coordinator/internal/logic/rollermanager"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
dbCfg *database.Config
|
||||
|
||||
base *docker.App
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
chunk *types.Chunk
|
||||
@@ -66,46 +55,60 @@ func randomURL() string {
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
dbCfg = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
|
||||
var err error
|
||||
db, err = database.InitDB(dbCfg)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
batchOrm = orm.NewBatch(db)
|
||||
chunkOrm = orm.NewChunk(db)
|
||||
l2BlockOrm = orm.NewL2Block(db)
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_02.json")
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../common/testdata/blockTrace_03.json")
|
||||
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock2 = &types.WrappedBlock{}
|
||||
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk = &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}}
|
||||
}
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
dbConf := database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
db, err := database.InitDB(&dbConf)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
return db
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, db *gorm.DB) (*http.Server, *gorm.DB, *cron.Collector) {
|
||||
if db == nil {
|
||||
db = setupDB(t)
|
||||
}
|
||||
conf := config.Config{
|
||||
RollersPerSession: rollersPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
CollectionTime: 1,
|
||||
TokenTimeToLive: 5,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 2,
|
||||
}
|
||||
proofCollector := cron.NewCollector(context.Background(), db, &conf)
|
||||
tmpAPI := api.APIs(&conf, db)
|
||||
handler, _, err := utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], tmpAPI, flate.NoCompression)
|
||||
assert.NoError(t, err)
|
||||
rollermanager.InitRollerManager()
|
||||
|
||||
return handler, db, proofCollector
|
||||
}
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
// Set up the test environment.
|
||||
base = docker.NewDockerApp()
|
||||
setEnv(t)
|
||||
|
||||
t.Run("TestHandshake", testHandshake)
|
||||
@@ -114,10 +117,10 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestValidProof", testValidProof)
|
||||
t.Run("TestInvalidProof", testInvalidProof)
|
||||
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
|
||||
t.Run("TestTimedoutProof", testTimedoutProof)
|
||||
t.Run("TestTimeoutProof", testTimeoutProof)
|
||||
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
|
||||
t.Run("TestGracefulRestart", testGracefulRestart)
|
||||
t.Run("TestListRollers", testListRollers)
|
||||
// t.Run("TestListRollers", testListRollers)
|
||||
|
||||
// Teardown
|
||||
t.Cleanup(func() {
|
||||
@@ -128,10 +131,11 @@ func TestApis(t *testing.T) {
|
||||
func testHandshake(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
handler, db, proofCollector := setupCoordinator(t, 1, wsURL, nil)
|
||||
defer func() {
|
||||
database.CloseDB(db)
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
proofCollector.Stop()
|
||||
}()
|
||||
|
||||
roller1 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeChunk)
|
||||
@@ -140,17 +144,18 @@ func testHandshake(t *testing.T) {
|
||||
roller2 := newMockRoller(t, "roller_test", wsURL, message.ProofTypeBatch)
|
||||
defer roller2.close()
|
||||
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
}
|
||||
|
||||
func testFailedHandshake(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
handler, db, proofCollector := setupCoordinator(t, 1, wsURL, nil)
|
||||
defer func() {
|
||||
database.CloseDB(db)
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
proofCollector.Stop()
|
||||
}()
|
||||
|
||||
// prepare
|
||||
@@ -160,7 +165,7 @@ func testFailedHandshake(t *testing.T) {
|
||||
|
||||
// Try to perform handshake without token
|
||||
// create a new ws connection
|
||||
client, err := client2.DialContext(ctx, wsURL)
|
||||
c, err := client.DialContext(ctx, wsURL)
|
||||
assert.NoError(t, err)
|
||||
// create private key
|
||||
privkey, err := crypto.GenerateKey()
|
||||
@@ -173,12 +178,12 @@ func testFailedHandshake(t *testing.T) {
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
assert.Error(t, err)
|
||||
|
||||
// Try to perform handshake with timeouted token
|
||||
// create a new ws connection
|
||||
client, err = client2.DialContext(ctx, wsURL)
|
||||
c, err = client.DialContext(ctx, wsURL)
|
||||
assert.NoError(t, err)
|
||||
// create private key
|
||||
privkey, err = crypto.GenerateKey()
|
||||
@@ -191,26 +196,26 @@ func testFailedHandshake(t *testing.T) {
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
token, err := client.RequestToken(ctx, authMsg)
|
||||
token, err := c.RequestToken(ctx, authMsg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
authMsg.Identity.Token = token
|
||||
assert.NoError(t, authMsg.SignWithKey(privkey))
|
||||
|
||||
<-time.After(6 * time.Second)
|
||||
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 0, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
}
|
||||
|
||||
func testSeveralConnections(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
handler, db, proofCollector := setupCoordinator(t, 1, wsURL, nil)
|
||||
defer func() {
|
||||
database.CloseDB(db)
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
proofCollector.Stop()
|
||||
}()
|
||||
|
||||
var (
|
||||
@@ -229,8 +234,8 @@ func testSeveralConnections(t *testing.T) {
|
||||
assert.NoError(t, eg.Wait())
|
||||
|
||||
// check roller's idle connections
|
||||
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, batch/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
assert.Equal(t, batch/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, batch/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
// close connection
|
||||
for _, roller := range rollers {
|
||||
@@ -239,12 +244,12 @@ func testSeveralConnections(t *testing.T) {
|
||||
|
||||
var (
|
||||
tick = time.Tick(time.Second)
|
||||
tickStop = time.Tick(time.Second * 15)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
if rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
|
||||
if rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk) == 0 {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
@@ -255,12 +260,12 @@ func testSeveralConnections(t *testing.T) {
|
||||
}
|
||||
|
||||
func testValidProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
|
||||
handler, db, collector := setupCoordinator(t, 3, wsURL, nil)
|
||||
defer func() {
|
||||
database.CloseDB(db)
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
collector.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
@@ -288,8 +293,12 @@ func testValidProof(t *testing.T) {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
@@ -303,7 +312,7 @@ func testValidProof(t *testing.T) {
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
for {
|
||||
select {
|
||||
@@ -325,10 +334,11 @@ func testValidProof(t *testing.T) {
|
||||
func testInvalidProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
|
||||
handler, db, collector := setupCoordinator(t, 3, wsURL, nil)
|
||||
defer func() {
|
||||
database.CloseDB(db)
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
collector.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
@@ -349,8 +359,12 @@ func testInvalidProof(t *testing.T) {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
@@ -364,7 +378,7 @@ func testInvalidProof(t *testing.T) {
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
for {
|
||||
select {
|
||||
@@ -386,10 +400,11 @@ func testInvalidProof(t *testing.T) {
|
||||
func testProofGeneratedFailed(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 3, wsURL, true)
|
||||
handler, db, collector := setupCoordinator(t, 3, wsURL, nil)
|
||||
defer func() {
|
||||
database.CloseDB(db)
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
collector.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
@@ -410,8 +425,12 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
roller.close()
|
||||
}
|
||||
}()
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 3, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
@@ -425,7 +444,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(500 * time.Millisecond)
|
||||
tickStop = time.Tick(10 * time.Second)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
for {
|
||||
select {
|
||||
@@ -444,13 +463,14 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testTimedoutProof(t *testing.T) {
|
||||
func testTimeoutProof(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
handler, db, collector := setupCoordinator(t, 1, wsURL, nil)
|
||||
defer func() {
|
||||
database.CloseDB(db)
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
collector.Stop()
|
||||
}()
|
||||
|
||||
// create first chunk & batch mock roller, that will not send any proof.
|
||||
@@ -461,8 +481,12 @@ func testTimedoutProof(t *testing.T) {
|
||||
chunkRoller1.close()
|
||||
batchRoller1.close()
|
||||
}()
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
@@ -497,8 +521,8 @@ func testTimedoutProof(t *testing.T) {
|
||||
chunkRoller2.close()
|
||||
batchRoller2.close()
|
||||
}()
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, 1, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
// verify proof status, it should be verified now, because second roller sent valid proof
|
||||
ok = utils.TryTimes(200, func() bool {
|
||||
@@ -518,10 +542,11 @@ func testTimedoutProof(t *testing.T) {
|
||||
func testIdleRollerSelection(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
handler, db, collector := setupCoordinator(t, 1, wsURL, nil)
|
||||
defer func() {
|
||||
database.CloseDB(db)
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
collector.Stop()
|
||||
}()
|
||||
|
||||
// create mock rollers.
|
||||
@@ -543,8 +568,12 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, len(rollers)/2, rollerManager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
assert.Equal(t, len(rollers)/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeChunk))
|
||||
assert.Equal(t, len(rollers)/2, rollermanager.Manager.GetNumberOfIdleRollers(message.ProofTypeBatch))
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
@@ -580,7 +609,11 @@ func testIdleRollerSelection(t *testing.T) {
|
||||
func testGracefulRestart(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
handler, db, collector := setupCoordinator(t, 1, wsURL, nil)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
@@ -604,25 +637,22 @@ func testGracefulRestart(t *testing.T) {
|
||||
chunkRoller.close()
|
||||
batchRoller.close()
|
||||
|
||||
info, err := rollerManager.GetSessionInfo(dbChunk.Hash)
|
||||
provingStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
|
||||
assert.Equal(t, types.ProvingTaskAssigned, provingStatus)
|
||||
|
||||
// Close rollerManager and ws handler.
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
collector.Stop()
|
||||
|
||||
// Setup new coordinator and ws server.
|
||||
newRollerManager, newHandler := setupCoordinator(t, 1, wsURL, false)
|
||||
newHandler, newDb, newCollector := setupCoordinator(t, 1, wsURL, db)
|
||||
defer func() {
|
||||
newHandler.Shutdown(context.Background())
|
||||
newRollerManager.Stop()
|
||||
newCollector.Stop()
|
||||
database.CloseDB(newDb)
|
||||
}()
|
||||
|
||||
info, err = newRollerManager.GetSessionInfo(dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.ProvingTaskAssigned.String(), info.Status)
|
||||
|
||||
// at this point, roller haven't submitted
|
||||
status, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
@@ -662,220 +692,3 @@ func testGracefulRestart(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testListRollers(t *testing.T) {
|
||||
// Setup coordinator and ws server.
|
||||
wsURL := "ws://" + randomURL()
|
||||
rollerManager, handler := setupCoordinator(t, 1, wsURL, true)
|
||||
defer func() {
|
||||
handler.Shutdown(context.Background())
|
||||
rollerManager.Stop()
|
||||
}()
|
||||
|
||||
var names = []string{
|
||||
"roller_test_1",
|
||||
"roller_test_2",
|
||||
"roller_test_3",
|
||||
"roller_test_4",
|
||||
}
|
||||
|
||||
roller1 := newMockRoller(t, names[0], wsURL, message.ProofTypeChunk)
|
||||
roller2 := newMockRoller(t, names[1], wsURL, message.ProofTypeBatch)
|
||||
roller3 := newMockRoller(t, names[2], wsURL, message.ProofTypeChunk)
|
||||
roller4 := newMockRoller(t, names[3], wsURL, message.ProofTypeBatch)
|
||||
defer func() {
|
||||
roller1.close()
|
||||
roller2.close()
|
||||
}()
|
||||
|
||||
// test ListRollers API
|
||||
rollers, err := rollerManager.ListRollers()
|
||||
assert.NoError(t, err)
|
||||
var rollersName []string
|
||||
for _, roller := range rollers {
|
||||
rollersName = append(rollersName, roller.Name)
|
||||
}
|
||||
sort.Strings(rollersName)
|
||||
assert.True(t, reflect.DeepEqual(names, rollersName))
|
||||
|
||||
// test ListRollers if two rollers closed.
|
||||
roller3.close()
|
||||
roller4.close()
|
||||
// wait coordinator free completely
|
||||
time.Sleep(time.Second * 5)
|
||||
|
||||
rollers, err = rollerManager.ListRollers()
|
||||
assert.NoError(t, err)
|
||||
var newRollersName []string
|
||||
for _, roller := range rollers {
|
||||
newRollersName = append(newRollersName, roller.Name)
|
||||
}
|
||||
sort.Strings(newRollersName)
|
||||
assert.True(t, reflect.DeepEqual(names[:2], newRollersName))
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, rollersPerSession uint8, wsURL string, resetDB bool) (rollerManager *coordinator.Manager, handler *http.Server) {
|
||||
db, err := database.InitDB(dbCfg)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
if resetDB {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
}
|
||||
|
||||
rollerManager, err = coordinator.New(context.Background(), &config.RollerManagerConfig{
|
||||
RollersPerSession: rollersPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
CollectionTime: 1,
|
||||
TokenTimeToLive: 5,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 2,
|
||||
}, db)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, rollerManager.Start())
|
||||
|
||||
// start ws service
|
||||
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs(), flate.NoCompression)
|
||||
assert.NoError(t, err)
|
||||
|
||||
return rollerManager, handler
|
||||
}
|
||||
|
||||
type mockRoller struct {
|
||||
rollerName string
|
||||
privKey *ecdsa.PrivateKey
|
||||
proofType message.ProofType
|
||||
|
||||
wsURL string
|
||||
client *client2.Client
|
||||
|
||||
taskCh chan *message.TaskMsg
|
||||
taskCache sync.Map
|
||||
|
||||
sub ethereum.Subscription
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
|
||||
roller := &mockRoller{
|
||||
rollerName: rollerName,
|
||||
privKey: privKey,
|
||||
proofType: proofType,
|
||||
wsURL: wsURL,
|
||||
taskCh: make(chan *message.TaskMsg, 4),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
roller.client, roller.sub, err = roller.connectToCoordinator()
|
||||
assert.NoError(t, err)
|
||||
|
||||
return roller
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the roller manager.
|
||||
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
|
||||
// Create connection.
|
||||
client, err := client2.Dial(r.wsURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// create a new ws connection
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: r.rollerName,
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
RollerType: r.proofType,
|
||||
},
|
||||
}
|
||||
_ = authMsg.SignWithKey(r.privKey)
|
||||
|
||||
token, err := client.RequestToken(context.Background(), authMsg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
authMsg.Identity.Token = token
|
||||
_ = authMsg.SignWithKey(r.privKey)
|
||||
|
||||
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client, sub, nil
|
||||
}
|
||||
|
||||
func (r *mockRoller) releaseTasks() {
|
||||
r.taskCache.Range(func(key, value any) bool {
|
||||
r.taskCh <- value.(*message.TaskMsg)
|
||||
r.taskCache.Delete(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
type proofStatus uint32
|
||||
|
||||
const (
|
||||
verifiedSuccess proofStatus = iota
|
||||
verifiedFailed
|
||||
generatedFailed
|
||||
)
|
||||
|
||||
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
|
||||
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
|
||||
// simulating the case that the roller first disconnects and then reconnects to the coordinator
|
||||
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
|
||||
if reconnect {
|
||||
var err error
|
||||
r.client, r.sub, err = r.connectToCoordinator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Release cached tasks.
|
||||
r.releaseTasks()
|
||||
|
||||
r.stopCh = make(chan struct{})
|
||||
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
|
||||
}
|
||||
|
||||
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case task := <-r.taskCh:
|
||||
r.taskCache.Store(task.ID, task)
|
||||
// simulate proof time
|
||||
select {
|
||||
case <-time.After(proofTime):
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: task.ID,
|
||||
Type: r.proofType,
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
if proofStatus == generatedFailed {
|
||||
proof.Status = message.StatusProofError
|
||||
} else if proofStatus == verifiedFailed {
|
||||
proof.ProofDetail.Proof.Proof = []byte(verifier.InvalidTestProof)
|
||||
}
|
||||
assert.NoError(t, proof.Sign(r.privKey))
|
||||
assert.NoError(t, client.SubmitProof(context.Background(), proof))
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *mockRoller) close() {
|
||||
close(r.stopCh)
|
||||
r.sub.Unsubscribe()
|
||||
}
|
||||
155
coordinator/test/mock_roller.go
Normal file
155
coordinator/test/mock_roller.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
client2 "scroll-tech/coordinator/client"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type proofStatus uint32
|
||||
|
||||
const (
|
||||
verifiedSuccess proofStatus = iota
|
||||
verifiedFailed
|
||||
generatedFailed
|
||||
)
|
||||
|
||||
type mockRoller struct {
|
||||
rollerName string
|
||||
privKey *ecdsa.PrivateKey
|
||||
proofType message.ProofType
|
||||
|
||||
wsURL string
|
||||
client *client2.Client
|
||||
|
||||
taskCh chan *message.TaskMsg
|
||||
taskCache sync.Map
|
||||
|
||||
sub ethereum.Subscription
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func newMockRoller(t *testing.T, rollerName string, wsURL string, proofType message.ProofType) *mockRoller {
|
||||
privKey, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
|
||||
roller := &mockRoller{
|
||||
rollerName: rollerName,
|
||||
privKey: privKey,
|
||||
proofType: proofType,
|
||||
wsURL: wsURL,
|
||||
taskCh: make(chan *message.TaskMsg, 4),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
roller.client, roller.sub, err = roller.connectToCoordinator()
|
||||
assert.NoError(t, err)
|
||||
|
||||
return roller
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the roller manager.
|
||||
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
|
||||
// Create connection.
|
||||
client, err := client2.Dial(r.wsURL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// create a new ws connection
|
||||
authMsg := &message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Name: r.rollerName,
|
||||
Timestamp: uint32(time.Now().Unix()),
|
||||
},
|
||||
}
|
||||
_ = authMsg.SignWithKey(r.privKey)
|
||||
|
||||
token, err := client.RequestToken(context.Background(), authMsg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
authMsg.Identity.Token = token
|
||||
_ = authMsg.SignWithKey(r.privKey)
|
||||
|
||||
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client, sub, nil
|
||||
}
|
||||
|
||||
func (r *mockRoller) releaseTasks() {
|
||||
r.taskCache.Range(func(key, value any) bool {
|
||||
r.taskCh <- value.(*message.TaskMsg)
|
||||
r.taskCache.Delete(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
|
||||
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
|
||||
// simulating the case that the roller first disconnects and then reconnects to the coordinator
|
||||
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
|
||||
if reconnect {
|
||||
var err error
|
||||
r.client, r.sub, err = r.connectToCoordinator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Release cached tasks.
|
||||
r.releaseTasks()
|
||||
|
||||
r.stopCh = make(chan struct{})
|
||||
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
|
||||
}
|
||||
|
||||
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case task := <-r.taskCh:
|
||||
r.taskCache.Store(task.ID, task)
|
||||
// simulate proof time
|
||||
select {
|
||||
case <-time.After(proofTime):
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: task.ID,
|
||||
Status: message.StatusOk,
|
||||
Proof: &message.AggProof{},
|
||||
},
|
||||
}
|
||||
if proofStatus == generatedFailed {
|
||||
proof.Status = message.StatusProofError
|
||||
} else if proofStatus == verifiedFailed {
|
||||
proof.ProofDetail.Proof.Proof = []byte(verifier.InvalidTestProof)
|
||||
}
|
||||
assert.NoError(t, proof.Sign(r.privKey))
|
||||
assert.NoError(t, client.SubmitProof(context.Background(), proof))
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *mockRoller) close() {
|
||||
close(r.stopCh)
|
||||
r.sub.Unsubscribe()
|
||||
}
|
||||
545
coordinator/testdata/blockTrace_02.json
vendored
Normal file
545
coordinator/testdata/blockTrace_02.json
vendored
Normal file
@@ -0,0 +1,545 @@
|
||||
{
|
||||
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
|
||||
"coinbase": {
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 2,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
"header": {
|
||||
"parentHash": "0xe17f08d25ef61a8ee12aa29704b901345a597f5e45a9a0f603ae0f70845b54dc",
|
||||
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"miner": "0x0000000000000000000000000000000000000000",
|
||||
"stateRoot": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
|
||||
"transactionsRoot": "0x3057754c197f33e1fe799e996db6232b5257412feea05b3c1754738f0b33fe32",
|
||||
"receiptsRoot": "0xd95b673818fa493deec414e01e610d97ee287c9421c8eff4102b1647c1a184e4",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"difficulty": "0x2",
|
||||
"number": "0x2",
|
||||
"gasLimit": "0x355418d1e8184",
|
||||
"gasUsed": "0xa410",
|
||||
"timestamp": "0x63807b2a",
|
||||
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000004b54a94f0df14333e63c8a13dfe6097c1a08b5fd2c225a8dc0f199dae245aead55d6f774a980a0c925be407748d56a14106afda7ddc1dec342e7ee3b0d58a8df01",
|
||||
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"nonce": "0x0000000000000000",
|
||||
"baseFeePerGas": "0x1de9",
|
||||
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
|
||||
},
|
||||
"transactions": [
|
||||
{
|
||||
"type": 0,
|
||||
"nonce": 0,
|
||||
"txHash": "0xb2febc1213baec968f6575789108e175273b8da8f412468098893084229f1542",
|
||||
"gas": 500000,
|
||||
"gasPrice": "0x3b9aec2e",
|
||||
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"to": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
|
||||
"chainId": "0xcf55",
|
||||
"value": "0x152d02c7e14af6000000",
|
||||
"data": "0x",
|
||||
"isCreate": false,
|
||||
"v": "0x19ece",
|
||||
"r": "0xab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514",
|
||||
"s": "0x34cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b"
|
||||
},
|
||||
{
|
||||
"type": 0,
|
||||
"nonce": 1,
|
||||
"txHash": "0xe6ac2ffc543d07f1e280912a2abe3aa659bf83773740681151297ada1bb211dd",
|
||||
"gas": 500000,
|
||||
"gasPrice": "0x3b9aec2e",
|
||||
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"to": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
|
||||
"chainId": "0xcf55",
|
||||
"value": "0x152d02c7e14af6000000",
|
||||
"data": "0x",
|
||||
"isCreate": false,
|
||||
"v": "0x19ece",
|
||||
"r": "0xf039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316",
|
||||
"s": "0x5a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1"
|
||||
}
|
||||
],
|
||||
"storageTrace": {
|
||||
"rootBefore": "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
|
||||
"rootAfter": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
|
||||
"proofs": {
|
||||
"0x01bae6BF68E9A03Fb2bc0615b1bf0d69ce9411eD": [
|
||||
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
|
||||
],
|
||||
"0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63": [
|
||||
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
|
||||
],
|
||||
"0xc0c4C8bAEA3f6Acb49b6E1fb9e2ADEcEeaCB0cA2": [
|
||||
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
|
||||
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
|
||||
]
|
||||
}
|
||||
},
|
||||
"executionResults": [
|
||||
{
|
||||
"gas": 21000,
|
||||
"failed": false,
|
||||
"returnValue": "",
|
||||
"from": {
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 0,
|
||||
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
"to": {
|
||||
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
|
||||
"nonce": 0,
|
||||
"balance": "0x0",
|
||||
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"accountAfter": [
|
||||
{
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 1,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
|
||||
"nonce": 0,
|
||||
"balance": "0x152d02c7e14af6000000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 1,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"structLogs": []
|
||||
},
|
||||
{
|
||||
"gas": 21000,
|
||||
"failed": false,
|
||||
"returnValue": "",
|
||||
"from": {
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 1,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
"to": {
|
||||
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
|
||||
"nonce": 0,
|
||||
"balance": "0x0",
|
||||
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"accountAfter": [
|
||||
{
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 2,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
|
||||
"nonce": 0,
|
||||
"balance": "0x152d02c7e14af6000000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"nonce": 2,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"structLogs": []
|
||||
}
|
||||
],
|
||||
"mptwitness": [
|
||||
{
|
||||
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
|
||||
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
|
||||
"leaf": {
|
||||
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
|
||||
"leaf": {
|
||||
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
|
||||
"leaf": {
|
||||
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
|
||||
"leaf": {
|
||||
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 2,
|
||||
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
|
||||
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
|
||||
"leaf": {
|
||||
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
|
||||
"leaf": {
|
||||
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
|
||||
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
|
||||
"leaf": {
|
||||
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
|
||||
"leaf": {
|
||||
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
|
||||
"leaf": {
|
||||
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
|
||||
"leaf": {
|
||||
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 2,
|
||||
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 2,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
|
||||
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
|
||||
"leaf": {
|
||||
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
|
||||
"leaf": {
|
||||
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
|
||||
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
|
||||
"leaf": {
|
||||
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
|
||||
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
|
||||
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
null,
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x152d02c7e14af6000000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
|
||||
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x0",
|
||||
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
|
||||
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
|
||||
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
{
|
||||
"nonce": 2,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
},
|
||||
{
|
||||
"nonce": 2,
|
||||
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
|
||||
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
|
||||
"accountPath": [
|
||||
{
|
||||
"pathPart": "0x1",
|
||||
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
|
||||
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
|
||||
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pathPart": "0x3",
|
||||
"root": "0xaf16fd780a8c7616b95b20da69f4ff26e0253238e996f9516445d6d6bf92b725",
|
||||
"path": [
|
||||
{
|
||||
"value": "0x5bbe97e7e66485b203f9dfea64eb7fa7df06959b12cbde2beba14f8f91133a13",
|
||||
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
|
||||
},
|
||||
{
|
||||
"value": "0x2e591357b02ab3117c35ad94a4e1a724fdbd95d6463da1f6c8017e6d000ecf02",
|
||||
"sibling": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
{
|
||||
"value": "0x794953bb5d8aa00f90383ff435ce2ea58e30e1da1061e69455c38496766ec10f",
|
||||
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
|
||||
}
|
||||
],
|
||||
"leaf": {
|
||||
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
|
||||
"sibling": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616"
|
||||
}
|
||||
}
|
||||
],
|
||||
"accountUpdate": [
|
||||
null,
|
||||
{
|
||||
"nonce": 0,
|
||||
"balance": "0x152d02c7e14af6000000",
|
||||
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
|
||||
}
|
||||
],
|
||||
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"statePath": [
|
||||
null,
|
||||
null
|
||||
],
|
||||
"stateUpdate": [
|
||||
null,
|
||||
null
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
12877
coordinator/testdata/blockTrace_03.json
vendored
Normal file
12877
coordinator/testdata/blockTrace_03.json
vendored
Normal file
File diff suppressed because one or more lines are too long
104
go.work.sum
104
go.work.sum
@@ -12,12 +12,17 @@ github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.2/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 h1:E+m3SkZCN0Bf5q7YdTs5lSm2CYY3CK4spn5OmUIiQtk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
|
||||
github.com/Azure/azure-storage-blob-go v0.7.0 h1:MuueVOYkufCxJw5YZzF842DY2MBsp+hLuh2apKY0mck=
|
||||
github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
@@ -40,14 +45,18 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0=
|
||||
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
|
||||
github.com/ClickHouse/ch-go v0.55.0/go.mod h1:kQT2f+yp2p+sagQA/7kS6G3ukym+GQ5KAu1kuFAFDiU=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJerTHwaBxut1C0=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0/go.mod h1:8f2XZUi7XoeU+uPIytSi1cvx8fmJxi7vIgqpvYTF1+o=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.9.1/go.mod h1:teXfZNM90iQ99Jnuht+dxQXCuhDZ8nvvMoTJOFrcmcg=
|
||||
github.com/CloudyKit/jet/v3 v3.0.0 h1:1PwO5w5VCtlUUl+KTOBsTGZlhjWkcybsGaAau52tOy8=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
@@ -107,6 +116,7 @@ github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk
|
||||
github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
@@ -169,8 +179,10 @@ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
|
||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
|
||||
github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M=
|
||||
github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v23.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ=
|
||||
@@ -181,7 +193,12 @@ github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0Nkk
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=
|
||||
github.com/elastic/go-sysinfo v1.8.1/go.mod h1:JfllUnzoQV/JRYymbH3dO1yggI3mV2oTKSXsDHM+uIM=
|
||||
github.com/elastic/go-sysinfo v1.10.1/go.mod h1:QElTrQ6akcnAVCRwdkZtoAkwuTv8UVM4+qe0hPxT4NU=
|
||||
github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU=
|
||||
github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=
|
||||
@@ -216,6 +233,8 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=
|
||||
github.com/go-chi/chi/v5 v5.0.0 h1:DBPx88FjZJH3FsICfDAfIfnb7XxKIYVGG6lOPlhENAg=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
|
||||
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE=
|
||||
github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs=
|
||||
@@ -237,6 +256,7 @@ github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC
|
||||
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
||||
github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk=
|
||||
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
@@ -245,6 +265,8 @@ github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0
|
||||
github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||
github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
|
||||
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
|
||||
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
|
||||
@@ -255,11 +277,13 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
|
||||
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219 h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4=
|
||||
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38 h1:y0Wmhvml7cGnzPa9nocn/fMraMH/lMDdeG+rkx4VgYY=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
|
||||
@@ -270,6 +294,7 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
@@ -277,9 +302,11 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw=
|
||||
@@ -291,6 +318,7 @@ github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6Pb
|
||||
github.com/iden3/go-iden3-crypto v0.0.14/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
|
||||
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
|
||||
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
|
||||
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/influxdata/flux v0.65.1 h1:77BcVUCzvN5HMm8+j9PRBQ4iZcu98Dl4Y9rf+J5vhnc=
|
||||
github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385 h1:ED4e5Cc3z5vSN2Tz2GkOHN7vs4Sxe2yds6CXvDnvZFE=
|
||||
@@ -309,21 +337,26 @@ github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys=
|
||||
github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI=
|
||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y=
|
||||
github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w=
|
||||
github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4=
|
||||
github.com/jackc/pgx/v4 v4.17.0 h1:Hsx+baY8/zU2WtPLQyZi8WbecgcsWEeyoK1jvg/WgIo=
|
||||
github.com/jackc/pgx/v4 v4.17.0/go.mod h1:Gd6RmOhtFLTu8cp/Fhq4kP195KrshxYJH3oW8AWJ1pw=
|
||||
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackc/pgx/v5 v5.3.1/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
|
||||
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak=
|
||||
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=
|
||||
@@ -345,6 +378,7 @@ github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHz
|
||||
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfMuZT83xIwfPDxEI2OHu6xUmJMFE=
|
||||
github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
|
||||
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
@@ -368,11 +402,15 @@ github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HN
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U=
|
||||
github.com/mattn/goveralls v0.0.2 h1:7eJB6EqsPhRVxvwEXGnqdO2sJI0PTsrWoTMXEk9/OQc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/microsoft/go-mssqldb v0.21.0/go.mod h1:+4wZTUnz/SV6nffv+RRRB/ss8jPng5Sho2SmM1l2ts4=
|
||||
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
|
||||
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
|
||||
github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
|
||||
@@ -392,13 +430,19 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
|
||||
github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow=
|
||||
github.com/onsi/ginkgo/v2 v2.8.1 h1:xFTEVwOFa1D/Ty24Ws1npBWkDYEV9BqZrsDxVrVkrrU=
|
||||
github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||
github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY=
|
||||
github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM=
|
||||
github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs=
|
||||
github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU=
|
||||
github.com/paulmach/orb v0.7.1/go.mod h1:FWRlTgl88VI1RBx/MkrwWDRhQ96ctqMCh8boXhmqB/A=
|
||||
github.com/paulmach/orb v0.9.2/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU=
|
||||
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
|
||||
@@ -406,6 +450,9 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
|
||||
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
|
||||
github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
|
||||
@@ -413,9 +460,11 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:Om
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=
|
||||
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
|
||||
github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -430,6 +479,7 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20230306131930-03b4de32b78b/go.mod
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI=
|
||||
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
@@ -454,7 +504,7 @@ github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
|
||||
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
|
||||
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
|
||||
@@ -469,32 +519,42 @@ github.com/valyala/fasthttp v1.40.0 h1:CRq/00MfruPGFLTQKY8b+8SfdK60TxNztjRMnH0t1
|
||||
github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
|
||||
github.com/vertica/vertica-sql-go v1.3.2/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4=
|
||||
github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
|
||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
|
||||
github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||
go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg=
|
||||
go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
|
||||
go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
|
||||
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw=
|
||||
go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo=
|
||||
go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc=
|
||||
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
|
||||
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
|
||||
go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
|
||||
@@ -503,38 +563,33 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNT
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
|
||||
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
|
||||
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
@@ -542,11 +597,9 @@ golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw=
|
||||
@@ -561,26 +614,37 @@ google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
|
||||
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
gotest.tools v1.4.0/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
modernc.org/cc/v3 v3.36.1/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
|
||||
modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
|
||||
modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
|
||||
modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
|
||||
modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
|
||||
modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
|
||||
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||
modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
|
||||
modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
|
||||
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
|
||||
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
|
||||
modernc.org/sqlite v1.22.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk=
|
||||
modernc.org/strutil v1.1.2/go.mod h1:OYajnUAcI/MX+XD/Wx7v1bbdvcQSvxgtb0gC+u3d3eg=
|
||||
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
|
||||
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
|
||||
moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE=
|
||||
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
|
||||
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
|
||||
|
||||
@@ -33,7 +33,7 @@ var (
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/conf/config.json")
|
||||
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/config.json")
|
||||
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
|
||||
rollerApp = rapp.NewRollerApp(base, "../../roller/config.json", coordinatorApp.WSEndpoint())
|
||||
m.Run()
|
||||
bridgeApp.Free()
|
||||
|
||||
Reference in New Issue
Block a user