Compare commits

...

2 Commits

Author SHA1 Message Date
georgehao
a98a2ff4b5 feat(coordinator): fix login replay attack (#723)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-08-05 10:58:59 +02:00
colin
2a0c7ae6b5 refactor(coordinator & prover): RESTful API (#696)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-08-05 08:27:07 +02:00
69 changed files with 2736 additions and 15674 deletions

View File

@@ -83,7 +83,7 @@ func (c *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose || c.openLog {
fmt.Printf("%s:\n\t%v", c.name, out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
} else if strings.Contains(strings.ToLower(out), "error") || strings.Contains(strings.ToLower(out), "warning") || strings.Contains(strings.ToLower(out), "info") {
fmt.Printf("%s:\n\t%v", c.name, out)
}
go c.checkFuncs.IterCb(func(_ string, value interface{}) {

20
common/types/errno.go Normal file
View File

@@ -0,0 +1,20 @@
package types
const (
// Success shows OK.
Success = 0
// ErrJWTCommonErr jwt common error
ErrJWTCommonErr = 50000
// ErrJWTTokenExpired jwt token expired
ErrJWTTokenExpired = 50001
// ErrCoordinatorParameterInvalidNo is invalid params
ErrCoordinatorParameterInvalidNo = 20001
// ErrCoordinatorGetTaskFailure is getting prover task error
ErrCoordinatorGetTaskFailure = 20002
// ErrCoordinatorHandleZkProofFailure is handle submit proof error
ErrCoordinatorHandleZkProofFailure = 20003
// ErrCoordinatorEmptyProofData get empty proof data
ErrCoordinatorEmptyProofData = 20004
)

View File

@@ -58,15 +58,12 @@ type AuthMsg struct {
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// Prover name
Name string `json:"name"`
// Prover ProverType
ProverType ProofType `json:"prover_type,omitempty"`
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-prover/commits | jq -r ".[0].sha"
Version string `json:"version"`
// Random unique token generated by manager
Token string `json:"token"`
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// GenerateToken generates token

View File

@@ -15,9 +15,9 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
authMsg := &AuthMsg{
Identity: &Identity{
Name: "testName",
Version: "testVersion",
Token: "testToken",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzgxNzUsIm9yaWdfaWF0IjoxNjkxMDM0NTc1fQ.HybBMsEJFhyZqtIa2iVcHUP7CEFttf708jmTMAImAWA",
ProverName: "test",
ProverVersion: "v1.0.0",
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
@@ -46,15 +46,15 @@ func TestGenerateToken(t *testing.T) {
func TestIdentityHash(t *testing.T) {
identity := &Identity{
Name: "testName",
ProverType: ProofTypeChunk,
Version: "testVersion",
Token: "testToken",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzM0MTksIm9yaWdfaWF0IjoxNjkxMDI5ODE5fQ.EhkLZsj__rNPVC3ZDYBtvdh0nB8mmM_Hl82hObaIWOs",
ProverName: "test",
ProverVersion: "v1.0.0",
}
hash, err := identity.Hash()
assert.NoError(t, err)
expectedHash := "c0411a19531fb8c6133b2bae91f361c14e65f2d318aef72b83519e6061cad001"
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.43"
var tag = "v4.1.1"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -37,7 +37,7 @@ make lint
## Configure
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManagerConfig` in [`config/config.go`](config/config.go) for more details.
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
## Start

View File

@@ -1,51 +0,0 @@
package client
import (
"context"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/types/message"
)
// Client defines typed wrappers for the Ethereum RPC API.
type Client struct {
client *rpc.Client
}
// Dial connects a client to the given URL.
func Dial(rawurl string) (*Client, error) {
return DialContext(context.Background(), rawurl)
}
// DialContext connects a client to the given URL with a given context.
func DialContext(ctx context.Context, rawurl string) (*Client, error) {
c, err := rpc.DialContext(ctx, rawurl)
if err != nil {
return nil, err
}
return NewClient(c), nil
}
// NewClient creates a client that uses the given RPC client.
func NewClient(c *rpc.Client) *Client {
return &Client{client: c}
}
// RequestToken generates token for prover
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
var token string
err := c.client.CallContext(ctx, &token, "prover_requestToken", authMsg)
return token, err
}
// RegisterAndSubscribe subscribe prover and register, verified by sign data.
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
return c.client.Subscribe(ctx, "prover", taskCh, "register", authMsg)
}
// SubmitProof get proof from prover.
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) error {
return c.client.CallContext(ctx, nil, "prover_submitProof", proof)
}

View File

@@ -2,13 +2,17 @@ package app
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"os/signal"
"time"
// enable the pprof
_ "net/http/pprof"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -20,7 +24,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/route"
)
var app *cli.App
@@ -49,15 +53,12 @@ func action(ctx *cli.Context) error {
}
subCtx, cancel := context.WithCancel(ctx.Context)
db, err := database.InitDB(cfg.DBConfig)
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
proofCollector := cron.NewCollector(subCtx, db, cfg)
provermanager.InitProverManager(db)
defer func() {
proofCollector.Stop()
cancel()
@@ -66,34 +67,24 @@ func action(ctx *cli.Context) error {
}
}()
router := gin.Default()
api.InitController(cfg, db)
route.Route(router, cfg)
port := ctx.String(httpPortFlag.Name)
srv := &http.Server{
Addr: fmt.Sprintf(":%s", port),
Handler: router,
ReadHeaderTimeout: time.Minute,
}
// Start metrics server.
metrics.Serve(subCtx, ctx)
apis := api.RegisterAPIs(cfg, db)
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(fmt.Sprintf("%s:%d", ctx.String(httpListenAddrFlag.Name), ctx.Int(httpPortFlag.Name)), apis)
if err != nil {
log.Crit("Could not start RPC api", "error", err)
go func() {
if runServerErr := srv.ListenAndServe(); err != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
log.Crit("run coordinator http server failure", "error", runServerErr)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
// Register api and start ws service.
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(fmt.Sprintf("%s:%d", ctx.String(wsListenAddrFlag.Name), ctx.Int(wsPortFlag.Name)), apis, cfg.ProverManagerConfig.CompressionLevel)
if err != nil {
log.Crit("Could not start WS api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("WS endpoint closed", "url", fmt.Sprintf("ws://%v/", addr))
}()
log.Info("WS endpoint opened", "url", fmt.Sprintf("ws://%v/", addr))
}
}()
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
@@ -101,7 +92,17 @@ func action(ctx *cli.Context) error {
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
log.Info("start shutdown coordinator server ...")
closeCtx, cancelExit := context.WithTimeout(context.Background(), 5*time.Second)
defer cancelExit()
if err = srv.Shutdown(closeCtx); err != nil {
log.Warn("shutdown coordinator server failure", "error", err)
return nil
}
<-closeCtx.Done()
log.Info("coordinator server exiting success")
return nil
}

View File

@@ -18,7 +18,7 @@ import (
)
var (
wsStartPort int64 = 40000
httpStartPort int64 = 40000
)
// CoordinatorApp coordinator-test client manager.
@@ -29,7 +29,7 @@ type CoordinatorApp struct {
originFile string
coordinatorFile string
WSPort int64
HTTPPort int64
args []string
docker.AppAPI
@@ -39,13 +39,13 @@ type CoordinatorApp struct {
func NewCoordinatorApp(base *docker.App, file string) *CoordinatorApp {
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp)
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
wsPort := port.Int64() + wsStartPort
httpPort := port.Int64() + httpStartPort
coordinatorApp := &CoordinatorApp{
base: base,
originFile: file,
coordinatorFile: coordinatorFile,
WSPort: wsPort,
args: []string{"--log.debug", "--config", coordinatorFile, "--ws", "--ws.port", strconv.Itoa(int(wsPort))},
HTTPPort: httpPort,
args: []string{"--log.debug", "--config", coordinatorFile, "--http", "--http.port", strconv.Itoa(int(httpPort))},
}
if err := coordinatorApp.MockConfig(true); err != nil {
panic(err)
@@ -67,9 +67,9 @@ func (c *CoordinatorApp) Free() {
_ = os.Remove(c.coordinatorFile)
}
// WSEndpoint returns ws endpoint.
func (c *CoordinatorApp) WSEndpoint() string {
return fmt.Sprintf("ws://localhost:%d", c.WSPort)
// HTTPEndpoint returns ws endpoint.
func (c *CoordinatorApp) HTTPEndpoint() string {
return fmt.Sprintf("http://localhost:%d", c.HTTPPort)
}
// MockConfig creates a new coordinator config.
@@ -80,14 +80,15 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
return err
}
// Reset prover manager config for manager test cases.
cfg.ProverManagerConfig = &coordinatorConfig.ProverManagerConfig{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 1,
cfg.ProverManager = &coordinatorConfig.ProverManager{
ProversPerSession: 1,
Verifier: &coordinatorConfig.VerifierConfig{MockMode: true},
CollectionTimeSec: 60,
SessionAttempts: 10,
MaxVerifierWorkers: 4,
}
cfg.DBConfig.DSN = base.DBImg.Endpoint()
cfg.L2Config.ChainID = 111
cfg.DB.DSN = base.DBImg.Endpoint()
cfg.L2.ChainID = 111
c.Config = cfg
if !store {

View File

@@ -1,25 +1,27 @@
{
"prover_manager_config": {
"compression_level": 9,
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 2,
"collection_time": 180,
"token_time_to_live": 60,
"session_attempts": 5,
"collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"params_path": "",
"assets_path": ""
},
"max_verifier_workers": 10,
"order_session": "ASC"
"max_verifier_workers": 4
},
"db_config": {
"db": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"l2_config": {
"l2": {
"chain_id": 111
},
"auth": {
"secret": "prover secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 10
}
}

View File

@@ -3,52 +3,67 @@ module scroll-tech/coordinator
go 1.19
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/orcaman/concurrent-map v1.0.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/appleboy/gin-jwt/v2 v2.9.1
github.com/gin-gonic/gin v1.9.1
github.com/go-resty/resty/v2 v2.7.0
github.com/mitchellh/mapstructure v1.5.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
github.com/shopspring/decimal v1.3.1
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.3.0
gorm.io/gorm v1.25.2
)
require (
github.com/bytedance/sonic v1.9.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
golang.org/x/arch v0.4.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/text v0.11.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.6.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/smartystreets/assertions v1.13.1 // indirect
github.com/smartystreets/goconvey v1.8.0
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/sys v0.10.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -1,7 +1,9 @@
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/appleboy/gin-jwt/v2 v2.9.1 h1:l29et8iLW6omcHltsOP6LLk4s3v4g2FbFs0koxGWVZs=
github.com/appleboy/gin-jwt/v2 v2.9.1/go.mod h1:jwcPZJ92uoC9nOUTOKWoN/f6JZOgMSKlFSHw5/FrRUk=
github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4=
github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -11,29 +13,62 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
@@ -44,10 +79,15 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -55,16 +95,30 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -73,6 +127,8 @@ github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
@@ -86,51 +142,117 @@ github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKl
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU=
github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w=
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -2,50 +2,46 @@ package config
import (
"encoding/json"
"errors"
"os"
"path/filepath"
"strings"
"scroll-tech/common/database"
)
const (
defaultNumberOfVerifierWorkers = 10
defaultNumberOfSessionRetryAttempts = 2
)
// ProverManagerConfig loads sequencer configuration items.
type ProverManagerConfig struct {
CompressionLevel int `json:"compression_level,omitempty"`
// asc or desc (default: asc)
OrderSession string `json:"order_session,omitempty"`
// ProverManager loads sequencer configuration items.
type ProverManager struct {
// The amount of provers to pick per proof generation session.
ProversPerSession uint8 `json:"provers_per_session"`
// Number of attempts that a session can be retried if previous attempts failed.
// Currently we only consider proving timeout as failure here.
SessionAttempts uint8 `json:"session_attempts,omitempty"`
SessionAttempts uint8 `json:"session_attempts"`
// Zk verifier config.
Verifier *VerifierConfig `json:"verifier,omitempty"`
// Proof collection time (in minutes).
CollectionTime int `json:"collection_time"`
// Token time to live (in seconds)
TokenTimeToLive int `json:"token_time_to_live"`
Verifier *VerifierConfig `json:"verifier"`
// Proof collection time (in seconds).
CollectionTimeSec int `json:"collection_time_sec"`
// Max number of workers in verifier worker pool
MaxVerifierWorkers int `json:"max_verifier_workers,omitempty"`
MaxVerifierWorkers int `json:"max_verifier_workers"`
}
// L2Config loads l2geth configuration items.
type L2Config struct {
// L2 loads l2geth configuration items.
type L2 struct {
// l2geth chain_id.
ChainID uint64 `json:"chain_id"`
}
// Auth provides the auth of prover-stats-api
type Auth struct {
Secret string `json:"secret"`
ChallengeExpireDurationSec int `json:"challenge_expire_duration_sec"`
LoginExpireDurationSec int `json:"token_expire_duration_sec"`
}
// Config load configuration items.
type Config struct {
ProverManagerConfig *ProverManagerConfig `json:"prover_manager_config"`
DBConfig *database.Config `json:"db_config"`
L2Config *L2Config `json:"l2_config"`
ProverManager *ProverManager `json:"prover_manager"`
DB *database.Config `json:"db"`
L2 *L2 `json:"l2"`
Auth *Auth `json:"auth"`
}
// VerifierConfig load zk verifier config.
@@ -68,19 +64,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// Check prover's order session
order := strings.ToUpper(cfg.ProverManagerConfig.OrderSession)
if len(order) > 0 && !(order == "ASC" || order == "DESC") {
return nil, errors.New("prover config's order session is invalid")
}
cfg.ProverManagerConfig.OrderSession = order
if cfg.ProverManagerConfig.MaxVerifierWorkers == 0 {
cfg.ProverManagerConfig.MaxVerifierWorkers = defaultNumberOfVerifierWorkers
}
if cfg.ProverManagerConfig.SessionAttempts == 0 {
cfg.ProverManagerConfig.SessionAttempts = defaultNumberOfSessionRetryAttempts
}
return cfg, nil
}

View File

@@ -12,29 +12,31 @@ import (
func TestConfig(t *testing.T) {
configTemplate := `{
"prover_manager_config": {
"compression_level": 9,
"prover_manager": {
"provers_per_session": 1,
"session_attempts": %d,
"collection_time": 180,
"token_time_to_live": 60,
"session_attempts": 5,
"collection_time_sec": 180,
"verifier": {
"mock_mode": true,
"params_path": "",
"agg_vk_path": ""
},
"max_verifier_workers": %d,
"order_session": "%s"
"max_verifier_workers": 4
},
"db_config": {
"db": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"l2_config": {
"l2": {
"chain_id": 111
}
},
"auth": {
"secret": "prover secret key",
"challenge_expire_duration_sec": 3600,
"login_expire_duration_sec": 3600
}
}`
t.Run("Success Case", func(t *testing.T) {
@@ -44,8 +46,7 @@ func TestConfig(t *testing.T) {
assert.NoError(t, tmpFile.Close())
assert.NoError(t, os.Remove(tmpFile.Name()))
}()
config := fmt.Sprintf(configTemplate, defaultNumberOfSessionRetryAttempts, defaultNumberOfVerifierWorkers, "ASC")
_, err = tmpFile.WriteString(config)
_, err = tmpFile.WriteString(configTemplate)
assert.NoError(t, err)
cfg, err := NewConfig(tmpFile.Name())
@@ -86,52 +87,4 @@ func TestConfig(t *testing.T) {
_, err = NewConfig(tmpFile.Name())
assert.Error(t, err)
})
t.Run("Invalid Order Session", func(t *testing.T) {
tmpFile, err := os.CreateTemp("", "example")
assert.NoError(t, err)
defer func() {
assert.NoError(t, tmpFile.Close())
assert.NoError(t, os.Remove(tmpFile.Name()))
}()
config := fmt.Sprintf(configTemplate, defaultNumberOfSessionRetryAttempts, defaultNumberOfVerifierWorkers, "INVALID")
_, err = tmpFile.WriteString(config)
assert.NoError(t, err)
_, err = NewConfig(tmpFile.Name())
assert.Error(t, err)
assert.Contains(t, err.Error(), "prover config's order session is invalid")
})
t.Run("Default MaxVerifierWorkers", func(t *testing.T) {
tmpFile, err := os.CreateTemp("", "example")
assert.NoError(t, err)
defer func() {
assert.NoError(t, tmpFile.Close())
assert.NoError(t, os.Remove(tmpFile.Name()))
}()
config := fmt.Sprintf(configTemplate, defaultNumberOfSessionRetryAttempts, 0, "ASC")
_, err = tmpFile.WriteString(config)
assert.NoError(t, err)
cfg, err := NewConfig(tmpFile.Name())
assert.NoError(t, err)
assert.Equal(t, defaultNumberOfVerifierWorkers, cfg.ProverManagerConfig.MaxVerifierWorkers)
})
t.Run("Default SessionAttempts", func(t *testing.T) {
tmpFile, err := os.CreateTemp("", "example")
assert.NoError(t, err)
defer func() {
assert.NoError(t, tmpFile.Close())
assert.NoError(t, os.Remove(tmpFile.Name()))
}()
config := fmt.Sprintf(configTemplate, 0, defaultNumberOfVerifierWorkers, "ASC")
_, err = tmpFile.WriteString(config)
assert.NoError(t, err)
cfg, err := NewConfig(tmpFile.Name())
assert.NoError(t, err)
assert.Equal(t, uint8(defaultNumberOfSessionRetryAttempts), cfg.ProverManagerConfig.SessionAttempts)
})
}

View File

@@ -0,0 +1,93 @@
package api
import (
"fmt"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/logic/auth"
"scroll-tech/coordinator/internal/types"
)
// AuthController is login API
type AuthController struct {
loginLogic *auth.LoginLogic
}
// NewAuthController returns an LoginController instance
func NewAuthController(db *gorm.DB) *AuthController {
return &AuthController{
loginLogic: auth.NewLoginLogic(db),
}
}
// Login the api controller for login
func (a *AuthController) Login(c *gin.Context) (interface{}, error) {
var login types.LoginParameter
if err := c.ShouldBind(&login); err != nil {
return "", fmt.Errorf("missing the public_key, err:%w", err)
}
// check login parameter's token is equal to bearer token, the Authorization must be existed
// if not exist, the jwt token will intercept it
brearToken := c.GetHeader("Authorization")
if brearToken != "Bearer "+login.Message.Challenge {
return "", fmt.Errorf("check challenge failure for the not equal challenge string")
}
// check the challenge is used, if used, return failure
if err := a.loginLogic.InsertChallengeString(c, login.Message.Challenge); err != nil {
return "", fmt.Errorf("login insert challenge string failure:%w", err)
}
return login, nil
}
// PayloadFunc returns jwt.MapClaims with {public key, prover name}.
func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
v, ok := data.(types.LoginParameter)
if !ok {
return jwt.MapClaims{}
}
// recover the public key
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
}
publicKey, err := authMsg.PublicKey()
if err != nil {
return jwt.MapClaims{}
}
return jwt.MapClaims{
types.PublicKey: publicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
}
}
// IdentityHandler replies to client for /login
func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
claims := jwt.ExtractClaims(c)
if proverName, ok := claims[types.ProverName]; ok {
c.Set(types.ProverName, proverName)
}
if publicKey, ok := claims[types.PublicKey]; ok {
c.Set(types.PublicKey, publicKey)
}
if proverVersion, ok := claims[types.ProverVersion]; ok {
c.Set(types.ProverVersion, proverVersion)
}
return nil
}

View File

@@ -0,0 +1,32 @@
package api
import (
"sync"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/config"
)
var (
// GetTask the prover task controller
GetTask *GetTaskController
// SubmitProof the submit proof controller
SubmitProof *SubmitProofController
// HealthCheck the health check controller
HealthCheck *HealthCheckController
// Auth the auth controller
Auth *AuthController
initControllerOnce sync.Once
)
// InitController inits Controller with database
func InitController(cfg *config.Config, db *gorm.DB) {
initControllerOnce.Do(func() {
Auth = NewAuthController(db)
HealthCheck = NewHealthCheckController()
GetTask = NewGetTaskController(cfg, db)
SubmitProof = NewSubmitProofController(cfg, db)
})
}

View File

@@ -0,0 +1,86 @@
package api
import (
"fmt"
"math/rand"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provertask"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// GetTaskController the get prover task api controller
type GetTaskController struct {
proverTasks map[message.ProofType]provertask.ProverTask
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, db *gorm.DB) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, db)
batchProverTask := provertask.NewBatchProverTask(cfg, db)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
}
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
ptc.proverTasks[message.ProofTypeBatch] = batchProverTask
return ptc
}
// GetTasks get assigned chunk/batch task
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
nerr := fmt.Errorf("prover tasks parameter invalid, err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofType := ptc.proofType(&getTaskParameter)
proverTask, isExist := ptc.proverTasks[proofType]
if !isExist {
nerr := fmt.Errorf("parameter wrong proof type")
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
result, err := proverTask.Assign(ctx, &getTaskParameter)
if err != nil {
nerr := fmt.Errorf("return prover task err:%w", err)
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorGetTaskFailure, nerr, nil)
return
}
if result == nil {
nerr := fmt.Errorf("get empty prover task")
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorEmptyProofData, nerr, nil)
return
}
coordinatorType.RenderJSON(ctx, types.Success, nil, result)
}
func (ptc *GetTaskController) proofType(para *coordinatorType.GetTaskParameter) message.ProofType {
proofType := message.ProofType(para.TaskType)
proofTypes := []message.ProofType{
message.ProofTypeChunk,
message.ProofTypeBatch,
}
if proofType == message.ProofTypeUndefined {
rand.Shuffle(len(proofTypes), func(i, j int) {
proofTypes[i], proofTypes[j] = proofTypes[j], proofTypes[i]
})
proofType = proofTypes[0]
}
return proofType
}

View File

@@ -0,0 +1,23 @@
package api
import (
"github.com/gin-gonic/gin"
ctypes "scroll-tech/common/types"
"scroll-tech/coordinator/internal/types"
)
// HealthCheckController is health check API
type HealthCheckController struct {
}
// NewHealthCheckController returns an HealthCheckController instance
func NewHealthCheckController() *HealthCheckController {
return &HealthCheckController{}
}
// HealthCheck the api controller for coordinator health check
func (a *HealthCheckController) HealthCheck(c *gin.Context) {
types.RenderJSON(c, ctypes.Success, nil, nil)
}

View File

@@ -1,115 +0,0 @@
package api
import (
"context"
"errors"
"fmt"
"time"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/proof"
)
// ProverController the prover api controller
type ProverController struct {
tokenCache *cache.Cache
proofReceiver *proof.ZKProofReceiver
taskWorker *proof.TaskWorker
}
// NewProverController create a prover controller
func NewProverController(cfg *config.ProverManagerConfig, db *gorm.DB) *ProverController {
return &ProverController{
proofReceiver: proof.NewZKProofReceiver(cfg, db),
taskWorker: proof.NewTaskWorker(),
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
}
}
// RequestToken get request token of authMsg
func (r *ProverController) RequestToken(authMsg *message.AuthMsg) (string, error) {
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return "", errors.New("signature verification failed")
}
pubkey, err := authMsg.PublicKey()
if err != nil {
return "", fmt.Errorf("RequestToken auth msg public key error:%w", err)
}
if token, ok := r.tokenCache.Get(pubkey); ok {
return token.(string), nil
}
token, err := message.GenerateToken()
if err != nil {
return "", errors.New("token generation failed")
}
r.tokenCache.SetDefault(pubkey, token)
return token, nil
}
// VerifyToken verifies pubkey for token and expiration time
func (r *ProverController) verifyToken(authMsg *message.AuthMsg) (bool, error) {
pubkey, err := authMsg.PublicKey()
if err != nil {
return false, fmt.Errorf("verify token auth msg public key error:%w", err)
}
// GetValue returns nil if value is expired
if token, ok := r.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
return false, fmt.Errorf("failed to find corresponding token. prover name: %s prover pk: %s", authMsg.Identity.Name, pubkey)
}
return true, nil
}
// Register register api for prover
func (r *ProverController) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
// Verify register message.
if ok, err := authMsg.Verify(); !ok {
if err != nil {
log.Error("failed to verify auth message", "error", err)
}
return nil, errors.New("signature verification failed")
}
// Lock here to avoid malicious prover message replay before cleanup of token
if ok, err := r.verifyToken(authMsg); !ok {
return nil, err
}
pubkey, err := authMsg.PublicKey()
if err != nil {
return nil, fmt.Errorf("register auth msg public key error:%w", err)
}
// prover successfully registered, remove token associated with this prover
r.tokenCache.Delete(pubkey)
rpcSub, err := r.taskWorker.AllocTaskWorker(ctx, authMsg)
if err != nil {
return rpcSub, err
}
return rpcSub, nil
}
// SubmitProof prover pull proof
func (r *ProverController) SubmitProof(proof *message.ProofMsg) error {
// Verify the signature
if ok, err := proof.Verify(); !ok {
if err != nil {
log.Error("failed to verify proof message", "error", err)
}
return errors.New("auth signature verify fail")
}
err := r.proofReceiver.HandleZkProof(context.Background(), proof)
if err != nil {
return err
}
return nil
}

View File

@@ -1,304 +0,0 @@
package api
import (
"context"
"crypto/ecdsa"
"database/sql"
"errors"
"fmt"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/proof"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
func geneAuthMsg(t *testing.T) (*message.AuthMsg, *ecdsa.PrivateKey) {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "prover_test1",
},
}
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
assert.NoError(t, authMsg.SignWithKey(privKey))
return authMsg, privKey
}
var proverController *ProverController
func init() {
conf := &config.ProverManagerConfig{
TokenTimeToLive: 120,
}
conf.Verifier = &config.VerifierConfig{MockMode: true}
proverController = NewProverController(conf, nil)
}
func TestProver_RequestToken(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "prover_test_request_token",
},
}
token, err := proverController.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token has already been distributed", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
key, err := tmpAuthMsg.PublicKey()
assert.NoError(t, err)
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
proverController.tokenCache.Set(key, tokenCacheStored, time.Hour)
token, err := proverController.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, token, tokenCacheStored)
})
convey.Convey("token generation failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return "", errors.New("token generation failed")
})
defer patchGuard.Reset()
token, err := proverController.RequestToken(tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, token)
})
convey.Convey("token generation success", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
tokenCacheStored := "c393987bb791dd285dd3d8ffbd770ed1"
patchGuard := gomonkey.ApplyFunc(message.GenerateToken, func() (string, error) {
return tokenCacheStored, nil
})
defer patchGuard.Reset()
token, err := proverController.RequestToken(tmpAuthMsg)
assert.NoError(t, err)
assert.Equal(t, tokenCacheStored, token)
})
}
func TestProver_Register(t *testing.T) {
convey.Convey("auth msg verify failure", t, func() {
tmpAuthMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: "prover_test_register",
},
}
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("verify token failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return false, errors.New("verify token failure")
})
defer patchGuard.Reset()
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("notifier failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
patchGuard.ApplyFunc(rpc.NotifierFromContext, func(ctx context.Context) (*rpc.Notifier, bool) {
return nil, false
})
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Equal(t, err, rpc.ErrNotificationsUnsupported)
assert.Equal(t, *subscription, rpc.Subscription{})
})
convey.Convey("register failure", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var taskWorker *proof.TaskWorker
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
return nil, errors.New("register error")
})
subscription, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.Error(t, err)
assert.Empty(t, subscription)
})
convey.Convey("register success", t, func() {
tmpAuthMsg, _ := geneAuthMsg(t)
patchGuard := gomonkey.ApplyPrivateMethod(proverController, "verifyToken", func(tmpAuthMsg *message.AuthMsg) (bool, error) {
return true, nil
})
defer patchGuard.Reset()
var taskWorker *proof.TaskWorker
patchGuard.ApplyPrivateMethod(taskWorker, "AllocTaskWorker", func(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
return nil, nil
})
_, err := proverController.Register(context.Background(), tmpAuthMsg)
assert.NoError(t, err)
})
}
func TestProver_SubmitProof(t *testing.T) {
tmpAuthMsg, prvKey := geneAuthMsg(t)
pubKey, err := tmpAuthMsg.PublicKey()
assert.NoError(t, err)
id := "provers_info_test"
tmpProof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
Type: message.ProofTypeChunk,
ID: id,
Status: message.StatusOk,
ChunkProof: &message.ChunkProof{},
},
}
assert.NoError(t, tmpProof.Sign(prvKey))
proofPubKey, err := tmpProof.PublicKey()
assert.NoError(t, err)
assert.Equal(t, pubKey, proofPubKey)
var proverTaskOrm *orm.ProverTask
patchGuard := gomonkey.ApplyMethodFunc(proverTaskOrm, "GetProverTasks", func(ctx context.Context, fields map[string]interface{}, orderByList []string, offset, limit int) ([]orm.ProverTask, error) {
return nil, nil
})
defer patchGuard.Reset()
provermanager.InitProverManager(nil)
taskChan, err := provermanager.Manager.Register(context.Background(), pubKey, tmpAuthMsg.Identity)
assert.NotNil(t, taskChan)
assert.NoError(t, err)
convey.Convey("verify failure", t, func() {
var s *message.ProofMsg
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return false, errors.New("proof verify error")
})
err = proverController.SubmitProof(tmpProof)
assert.Error(t, err)
})
var s *message.ProofMsg
patchGuard.ApplyMethodFunc(s, "Verify", func() (bool, error) {
return true, nil
})
var chunkOrm *orm.Chunk
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProofByHash", func(context.Context, string, *message.BatchProof, uint64, ...*gorm.DB) error {
return nil
})
patchGuard.ApplyMethodFunc(chunkOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
return nil
})
var batchOrm *orm.Batch
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProofByHash", func(ctx context.Context, hash string, proof *message.BatchProof, proofTimeSec uint64, dbTX ...*gorm.DB) error {
return nil
})
patchGuard.ApplyMethodFunc(batchOrm, "UpdateProvingStatus", func(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
return nil
})
convey.Convey("get none provers of prover task", t, func() {
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
return nil, nil
})
tmpProof1 := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: "10001",
Status: message.StatusOk,
ChunkProof: &message.ChunkProof{},
},
}
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
tmpProof1.Sign(privKey)
_, err1 := tmpProof1.PublicKey()
assert.NoError(t, err1)
err2 := proverController.SubmitProof(tmpProof1)
fmt.Println(err2)
targetErr := fmt.Errorf("validator failure get none prover task for the proof")
assert.Equal(t, err2.Error(), targetErr.Error())
})
patchGuard.ApplyMethodFunc(proverTaskOrm, "GetProverTaskByTaskIDAndPubKey", func(ctx context.Context, hash, pubKey string) (*orm.ProverTask, error) {
now := time.Now()
s := &orm.ProverTask{
TaskID: id,
ProverPublicKey: proofPubKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: "provers_info_test",
ProvingStatus: int16(types.ProverAssigned),
CreatedAt: now,
}
return s, nil
})
patchGuard.ApplyMethodFunc(proverTaskOrm, "UpdateProverTaskProvingStatus", func(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
return nil
})
patchGuard.ApplyPrivateMethod(proverController.proofReceiver, "proofFailure", func(hash string, pubKey string, proofMsgType message.ProofType) {
})
convey.Convey("proof msg status is not ok", t, func() {
tmpProof.Status = message.StatusProofError
err1 := proverController.SubmitProof(tmpProof)
assert.NoError(t, err1)
})
tmpProof.Status = message.StatusOk
var db *gorm.DB
patchGuard.ApplyMethodFunc(db, "Transaction", func(fc func(tx *gorm.DB) error, opts ...*sql.TxOptions) (err error) {
return nil
})
var tmpVerifier *verifier.Verifier
convey.Convey("verifier proof failure", t, func() {
targetErr := errors.New("verify proof failure")
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyChunkProof", func(proof *message.ChunkProof) (bool, error) {
return false, targetErr
})
err1 := proverController.SubmitProof(tmpProof)
assert.Nil(t, err1)
})
patchGuard.ApplyMethodFunc(tmpVerifier, "VerifyChunkProof", func(proof *message.ChunkProof) (bool, error) {
return true, nil
})
patchGuard.ApplyPrivateMethod(proverController.proofReceiver, "closeProofTask", func(hash string, pubKey string, proofMsg *message.ProofMsg, proversInfo *coordinatorType.ProversInfo) error {
return nil
})
err1 := proverController.SubmitProof(tmpProof)
assert.Nil(t, err1)
}

View File

@@ -1,30 +0,0 @@
package api
import (
"context"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
)
// ProverAPI for provers inorder to register and submit proof
type ProverAPI interface {
RequestToken(authMsg *message.AuthMsg) (string, error)
Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error)
SubmitProof(proof *message.ProofMsg) error
}
// RegisterAPIs register api for coordinator
func RegisterAPIs(cfg *config.Config, db *gorm.DB) []rpc.API {
return []rpc.API{
{
Namespace: "prover",
Service: ProverAPI(NewProverController(cfg.ProverManagerConfig, db)),
Public: true,
},
}
}

View File

@@ -0,0 +1,72 @@
package api
import (
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/submitproof"
coodinatorType "scroll-tech/coordinator/internal/types"
)
// SubmitProofController the submit proof api controller
type SubmitProofController struct {
submitProofReceiverLogic *submitproof.ProofReceiverLogic
}
// NewSubmitProofController create the submit proof api controller instance
func NewSubmitProofController(cfg *config.Config, db *gorm.DB) *SubmitProofController {
return &SubmitProofController{
submitProofReceiverLogic: submitproof.NewSubmitProofReceiverLogic(cfg.ProverManager, db),
}
}
// SubmitProof prover submit the proof to coordinator
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
var spp coodinatorType.SubmitProofParameter
if err := ctx.ShouldBind(&spp); err != nil {
nerr := fmt.Errorf("parameter invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg := message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: spp.TaskID,
Type: message.ProofType(spp.TaskType),
Status: message.RespStatus(spp.Status),
},
}
switch message.ProofType(spp.TaskType) {
case message.ProofTypeChunk:
var tmpChunkProof message.ChunkProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.ChunkProof = &tmpChunkProof
case message.ProofTypeBatch:
var tmpBatchProof message.BatchProof
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
return
}
proofMsg.BatchProof = &tmpBatchProof
}
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg); err != nil {
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
return
}
coodinatorType.RenderJSON(ctx, types.Success, nil, nil)
}

View File

@@ -12,7 +12,6 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/collector"
"scroll-tech/coordinator/internal/orm"
)
@@ -22,11 +21,8 @@ type Collector struct {
db *gorm.DB
ctx context.Context
stopRunChan chan struct{}
stopTimeoutChan chan struct{}
collectors map[message.ProofType]collector.Collector
proverTaskOrm *orm.ProverTask
chunkOrm *orm.Chunk
batchOrm *orm.Batch
@@ -38,18 +34,12 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config) *Collect
cfg: cfg,
db: db,
ctx: ctx,
stopRunChan: make(chan struct{}),
stopTimeoutChan: make(chan struct{}),
collectors: make(map[message.ProofType]collector.Collector),
proverTaskOrm: orm.NewProverTask(db),
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
}
c.collectors[message.ProofTypeBatch] = collector.NewBatchProofCollector(cfg, db)
c.collectors[message.ProofTypeChunk] = collector.NewChunkProofCollector(cfg, db)
go c.run()
go c.timeoutProofTask()
log.Info("Start coordinator successfully.")
@@ -59,40 +49,9 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config) *Collect
// Stop all the collector
func (c *Collector) Stop() {
c.stopRunChan <- struct{}{}
c.stopTimeoutChan <- struct{}{}
}
// run loop and cron collect
func (c *Collector) run() {
defer func() {
if err := recover(); err != nil {
nerr := fmt.Errorf("collector panic error:%v", err)
log.Warn(nerr.Error())
}
}()
ticker := time.NewTicker(time.Second * 2)
for {
select {
case <-ticker.C:
for _, tmpCollector := range c.collectors {
if err := tmpCollector.Collect(c.ctx); err != nil {
log.Warn("collect data to prover failure", "collector name", tmpCollector.Name(), "error", err)
}
}
case <-c.ctx.Done():
if c.ctx.Err() != nil {
log.Error("manager context canceled with error", "error", c.ctx.Err())
}
return
case <-c.stopRunChan:
log.Info("the coordinator run loop exit")
return
}
}
}
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
func (c *Collector) timeoutProofTask() {
@@ -114,7 +73,7 @@ func (c *Collector) timeoutProofTask() {
}
for _, assignedProverTask := range assignedProverTasks {
timeoutDuration := time.Duration(c.cfg.ProverManagerConfig.CollectionTime) * time.Minute
timeoutDuration := time.Duration(c.cfg.ProverManager.CollectionTimeSec) * time.Second
// here not update the block batch proving status failed, because the collector loop will check
// the attempt times. if reach the times, the collector will set the block batch proving status.
if time.Since(assignedProverTask.AssignedAt) >= timeoutDuration {

View File

@@ -0,0 +1,25 @@
package auth
import (
"github.com/gin-gonic/gin"
"gorm.io/gorm"
"scroll-tech/coordinator/internal/orm"
)
// LoginLogic the auth logic
type LoginLogic struct {
challengeOrm *orm.Challenge
}
// NewLoginLogic new a LoginLogic
func NewLoginLogic(db *gorm.DB) *LoginLogic {
return &LoginLogic{
challengeOrm: orm.NewChallenge(db),
}
}
// InsertChallengeString insert and check the challenge string is existed
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
return l.challengeOrm.InsertChallenge(ctx, challenge)
}

View File

@@ -1,139 +0,0 @@
package collector
import (
"context"
"encoding/json"
"fmt"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// BatchProofCollector is collector implement for batch proof
type BatchProofCollector struct {
BaseCollector
}
// NewBatchProofCollector new a batch collector
func NewBatchProofCollector(cfg *config.Config, db *gorm.DB) *BatchProofCollector {
bp := &BatchProofCollector{
BaseCollector: BaseCollector{
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
},
}
return bp
}
// Name return the batch proof collector name
func (bp *BatchProofCollector) Name() string {
return BatchCollectorName
}
// Collect load and send batch tasks
func (bp *BatchProofCollector) Collect(ctx context.Context) error {
batchTasks, err := bp.batchOrm.GetUnassignedBatches(ctx, 1)
if err != nil {
return fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
}
if len(batchTasks) == 0 {
return nil
}
if len(batchTasks) != 1 {
return fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
}
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash)
if provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch) == 0 {
return fmt.Errorf("no idle common prover when starting proof generation session, id:%s", batchTask.Hash)
}
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
return fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
}
proverStatusList, err := bp.sendTask(ctx, batchTask.Hash)
if err != nil {
return fmt.Errorf("send batch task id:%s err:%w", batchTask.Hash, err)
}
transErr := bp.db.Transaction(func(tx *gorm.DB) error {
// Update session proving status as assigned.
if err = bp.batchOrm.UpdateProvingStatus(ctx, batchTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
return fmt.Errorf("failed to update task status, id:%s, error:%w", batchTask.Hash, err)
}
for _, proverStatus := range proverStatusList {
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: proverStatus.PublicKey,
TaskType: int16(message.ProofTypeBatch),
ProverName: proverStatus.Name,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
return fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
}
}
return nil
})
return transErr
}
func (bp *BatchProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.ProverStatus, error) {
// get chunks from db
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, hash)
if err != nil {
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", hash, err)
return nil, err
}
taskDetail := &message.BatchTaskDetail{}
for _, chunk := range chunks {
chunkInfo := &message.ChunkInfo{
ChainID: bp.cfg.L2Config.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
IsPadding: false,
}
taskDetail.ChunkInfos = append(taskDetail.ChunkInfos, chunkInfo)
chunkProof := &message.ChunkProof{}
if err := json.Unmarshal(chunk.Proof, chunkProof); err != nil {
return nil, fmt.Errorf("json Unmarshal ChunkProof error: %w, chunk hash: %v", err, chunk.Hash)
}
taskDetail.ChunkProofs = append(taskDetail.ChunkProofs, chunkProof)
}
taskMsg := &message.TaskMsg{
ID: hash,
Type: message.ProofTypeBatch,
ChunkTaskDetail: nil,
BatchTaskDetail: taskDetail,
}
return bp.BaseCollector.sendTask(taskMsg)
}

View File

@@ -1,122 +0,0 @@
package collector
import (
"context"
"fmt"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ChunkProofCollector the chunk proof collector
type ChunkProofCollector struct {
BaseCollector
}
// NewChunkProofCollector new a chunk proof collector
func NewChunkProofCollector(cfg *config.Config, db *gorm.DB) *ChunkProofCollector {
cp := &ChunkProofCollector{
BaseCollector: BaseCollector{
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
},
}
return cp
}
// Name return a block batch collector name
func (cp *ChunkProofCollector) Name() string {
return ChunkCollectorName
}
// Collect the chunk proof which need to prove
func (cp *ChunkProofCollector) Collect(ctx context.Context) error {
// load and send chunk tasks
chunkTasks, err := cp.chunkOrm.GetUnassignedChunks(ctx, 1)
if err != nil {
return fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
}
if len(chunkTasks) == 0 {
return nil
}
if len(chunkTasks) != 1 {
return fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
}
chunkTask := chunkTasks[0]
log.Info("start chunk generation session", "id", chunkTask.Hash)
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
return fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
}
if provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk) == 0 {
return fmt.Errorf("no idle chunk prover when starting proof generation session, id:%s", chunkTask.Hash)
}
proverStatusList, err := cp.sendTask(ctx, chunkTask.Hash)
if err != nil {
return fmt.Errorf("send task failure, id:%s error:%w", chunkTask.Hash, err)
}
transErr := cp.db.Transaction(func(tx *gorm.DB) error {
// Update session proving status as assigned.
if err = cp.chunkOrm.UpdateProvingStatus(ctx, chunkTask.Hash, types.ProvingTaskAssigned, tx); err != nil {
log.Error("failed to update task status", "id", chunkTask.Hash, "err", err)
return err
}
for _, proverStatus := range proverStatusList {
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: proverStatus.PublicKey,
TaskType: int16(message.ProofTypeChunk),
ProverName: proverStatus.Name,
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask, tx); err != nil {
return fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, proverStatus.PublicKey, err)
}
}
return nil
})
return transErr
}
func (cp *ChunkProofCollector) sendTask(ctx context.Context, hash string) ([]*coordinatorType.ProverStatus, error) {
// Get block hashes.
wrappedBlocks, wrappedErr := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
if wrappedErr != nil {
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, wrappedErr)
}
blockHashes := make([]common.Hash, len(wrappedBlocks))
for i, wrappedBlock := range wrappedBlocks {
blockHashes[i] = wrappedBlock.Header.Hash()
}
taskMsg := &message.TaskMsg{
ID: hash,
Type: message.ProofTypeChunk,
ChunkTaskDetail: &message.ChunkTaskDetail{BlockHashes: blockHashes},
BatchTaskDetail: nil,
}
return cp.BaseCollector.sendTask(taskMsg)
}

View File

@@ -1,79 +0,0 @@
package proof
import (
"context"
"fmt"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/metrics"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/logic/provermanager"
)
var coordinatorProversDisconnectsTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/provers/disconnects/total", metrics.ScrollRegistry)
// TaskWorker held the prover task connection
type TaskWorker struct{}
// NewTaskWorker create a task worker
func NewTaskWorker() *TaskWorker {
return &TaskWorker{}
}
// AllocTaskWorker alloc a task worker goroutine
func (t *TaskWorker) AllocTaskWorker(ctx context.Context, authMsg *message.AuthMsg) (*rpc.Subscription, error) {
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported
}
pubKey, err := authMsg.PublicKey()
if err != nil {
return &rpc.Subscription{}, fmt.Errorf("AllocTaskWorker auth msg public key error:%w", err)
}
identity := authMsg.Identity
// create or get the prover message channel
taskCh, err := provermanager.Manager.Register(ctx, pubKey, identity)
if err != nil {
return &rpc.Subscription{}, err
}
rpcSub := notifier.CreateSubscription()
go t.worker(rpcSub, notifier, pubKey, identity, taskCh)
log.Info("prover register", "name", identity.Name, "pubKey", pubKey, "version", identity.Version)
return rpcSub, nil
}
// TODO worker add metrics
func (t *TaskWorker) worker(rpcSub *rpc.Subscription, notifier *rpc.Notifier, pubKey string, identity *message.Identity, taskCh <-chan *message.TaskMsg) {
defer func() {
if err := recover(); err != nil {
log.Error("task worker subId:%d panic for:%v", err)
}
provermanager.Manager.FreeProver(pubKey)
log.Info("prover unregister", "name", identity.Name, "pubKey", pubKey)
}()
for {
select {
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case err := <-rpcSub.Err():
coordinatorProversDisconnectsTotalCounter.Inc(1)
log.Warn("client stopped the ws connection", "name", identity.Name, "pubkey", pubKey, "err", err)
return
case <-notifier.Closed():
return
}
}
}

View File

@@ -1,60 +0,0 @@
package provermanager
import (
"time"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
)
type proverMetrics struct {
proverProofsVerifiedSuccessTimeTimer gethMetrics.Timer
proverProofsVerifiedFailedTimeTimer gethMetrics.Timer
proverProofsGeneratedFailedTimeTimer gethMetrics.Timer
proverProofsLastAssignedTimestampGauge gethMetrics.Gauge
proverProofsLastFinishedTimestampGauge gethMetrics.Gauge
}
func (r *proverManager) UpdateMetricProverProofsLastFinishedTimestampGauge(pk string) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsLastFinishedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *proverManager) UpdateMetricProverProofsLastAssignedTimestampGauge(pk string) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsLastAssignedTimestampGauge.Update(time.Now().Unix())
}
}
}
func (r *proverManager) UpdateMetricProverProofsVerifiedSuccessTimeTimer(pk string, d time.Duration) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsVerifiedSuccessTimeTimer.Update(d)
}
}
}
func (r *proverManager) UpdateMetricProverProofsVerifiedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsVerifiedFailedTimeTimer.Update(d)
}
}
}
func (r *proverManager) UpdateMetricProverProofsGeneratedFailedTimeTimer(pk string, d time.Duration) {
if node, ok := r.proverPool.Get(pk); ok {
rMs := node.(*proverNode).metrics
if rMs != nil {
rMs.proverProofsGeneratedFailedTimeTimer.Update(d)
}
}
}

View File

@@ -1,203 +0,0 @@
package provermanager
import (
"context"
"crypto/rand"
"errors"
"fmt"
"math/big"
"sync"
"time"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/orm"
)
var (
once sync.Once
// Manager the global prover manager
Manager *proverManager
)
// ProverNode is the interface that controls the provers
type proverNode struct {
// Prover name
Name string
// Prover type
Type message.ProofType
// Prover public key
PublicKey string
// Prover version
Version string
// task channel
taskChan chan *message.TaskMsg
// session id list which delivered to prover.
TaskIDs cmap.ConcurrentMap
// Time of message creation
registerTime time.Time
metrics *proverMetrics
}
type proverManager struct {
proverPool cmap.ConcurrentMap
proverTaskOrm *orm.ProverTask
}
// InitProverManager init a prover manager
func InitProverManager(db *gorm.DB) {
once.Do(func() {
Manager = &proverManager{
proverPool: cmap.New(),
proverTaskOrm: orm.NewProverTask(db),
}
})
}
// Register the identity message to prover manager with the public key
func (r *proverManager) Register(ctx context.Context, proverPublicKey string, identity *message.Identity) (<-chan *message.TaskMsg, error) {
node, ok := r.proverPool.Get(proverPublicKey)
if !ok {
taskIDs, err := r.reloadProverAssignedTasks(ctx, proverPublicKey)
if err != nil {
return nil, fmt.Errorf("register error:%w", err)
}
rMs := &proverMetrics{
proverProofsVerifiedSuccessTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/verified/success/time/%s", proverPublicKey), metrics.ScrollRegistry),
proverProofsVerifiedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/verified/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
proverProofsGeneratedFailedTimeTimer: gethMetrics.GetOrRegisterTimer(fmt.Sprintf("prover/proofs/generated/failed/time/%s", proverPublicKey), metrics.ScrollRegistry),
proverProofsLastAssignedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("prover/proofs/last/assigned/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
proverProofsLastFinishedTimestampGauge: gethMetrics.GetOrRegisterGauge(fmt.Sprintf("prover/proofs/last/finished/timestamp/%s", proverPublicKey), metrics.ScrollRegistry),
}
node = &proverNode{
Name: identity.Name,
Type: identity.ProverType,
Version: identity.Version,
PublicKey: proverPublicKey,
TaskIDs: *taskIDs,
taskChan: make(chan *message.TaskMsg, 4),
metrics: rMs,
}
r.proverPool.Set(proverPublicKey, node)
}
prover := node.(*proverNode)
// avoid reconnection too frequently.
if time.Since(prover.registerTime) < 60 {
log.Warn("prover reconnect too frequently", "prover_name", identity.Name, "prover_type", identity.ProverType, "public key", proverPublicKey)
return nil, fmt.Errorf("prover reconnect too frequently")
}
// update register time and status
prover.registerTime = time.Now()
return prover.taskChan, nil
}
func (r *proverManager) reloadProverAssignedTasks(ctx context.Context, proverPublicKey string) (*cmap.ConcurrentMap, error) {
var assignedProverTasks []orm.ProverTask
page := 0
limit := 100
for {
page++
whereFields := make(map[string]interface{})
whereFields["proving_status"] = int16(types.ProverAssigned)
orderBy := []string{"id asc"}
offset := (page - 1) * limit
batchAssignedProverTasks, err := r.proverTaskOrm.GetProverTasks(ctx, whereFields, orderBy, offset, limit)
if err != nil {
log.Warn("reloadProverAssignedTasks get all assigned failure", "error", err)
return nil, fmt.Errorf("reloadProverAssignedTasks error:%w", err)
}
if len(batchAssignedProverTasks) < limit {
break
}
assignedProverTasks = append(assignedProverTasks, batchAssignedProverTasks...)
}
taskIDs := cmap.New()
for _, assignedProverTask := range assignedProverTasks {
if assignedProverTask.ProverPublicKey == proverPublicKey && assignedProverTask.ProvingStatus == int16(types.ProverAssigned) {
taskIDs.Set(assignedProverTask.TaskID, struct{}{})
}
}
return &taskIDs, nil
}
// SendTask send the need proved message to prover
func (r *proverManager) SendTask(proverType message.ProofType, msg *message.TaskMsg) (string, string, error) {
tmpProver := r.selectProver(proverType)
if tmpProver == nil {
return "", "", errors.New("selectProver returns nil")
}
select {
case tmpProver.taskChan <- msg:
tmpProver.TaskIDs.Set(msg.ID, struct{}{})
default:
err := fmt.Errorf("prover channel is full, proverName:%s, publicKey:%s", tmpProver.Name, tmpProver.PublicKey)
return "", "", err
}
r.UpdateMetricProverProofsLastAssignedTimestampGauge(tmpProver.PublicKey)
return tmpProver.PublicKey, tmpProver.Name, nil
}
// ExistTaskIDForProver check the task exist
func (r *proverManager) ExistTaskIDForProver(pk string, id string) bool {
node, ok := r.proverPool.Get(pk)
if !ok {
return false
}
prover := node.(*proverNode)
return prover.TaskIDs.Has(id)
}
// FreeProver free the prover with the pk key
func (r *proverManager) FreeProver(pk string) {
r.proverPool.Pop(pk)
}
// FreeTaskIDForProver free a task of the pk prover
func (r *proverManager) FreeTaskIDForProver(pk string, id string) {
if node, ok := r.proverPool.Get(pk); ok {
prover := node.(*proverNode)
prover.TaskIDs.Pop(id)
}
}
// GetNumberOfIdleProvers return the count of idle provers.
func (r *proverManager) GetNumberOfIdleProvers(proverType message.ProofType) (count int) {
for item := range r.proverPool.IterBuffered() {
prover := item.Val.(*proverNode)
if prover.TaskIDs.Count() == 0 && prover.Type == proverType {
count++
}
}
return count
}
func (r *proverManager) selectProver(proverType message.ProofType) *proverNode {
pubkeys := r.proverPool.Keys()
for len(pubkeys) > 0 {
idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(pubkeys))))
if val, ok := r.proverPool.Get(pubkeys[idx.Int64()]); ok {
rn := val.(*proverNode)
if rn.TaskIDs.Count() == 0 && rn.Type == proverType {
return rn
}
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}
return nil
}

View File

@@ -0,0 +1,151 @@
package provertask
import (
"context"
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// BatchProverTask is prover task implement for batch proof
type BatchProverTask struct {
BaseProverTask
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, db *gorm.DB) *BatchProverTask {
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
},
}
return bp
}
// Assign load and assign batch tasks
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from contex failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from contex failed")
}
batchTasks, err := bp.batchOrm.UpdateUnassignedBatchReturning(ctx, 1)
if err != nil {
return nil, fmt.Errorf("failed to get unassigned batch proving tasks, error:%w", err)
}
if len(batchTasks) == 0 {
return nil, nil
}
if len(batchTasks) != 1 {
return nil, fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
}
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash)
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
}
proverTask := orm.ProverTask{
TaskID: batchTask.Hash,
ProverPublicKey: publicKey.(string),
TaskType: int16(message.ProofTypeBatch),
ProverName: proverName.(string),
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
// Store session info.
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
bp.recoverProvingStatus(ctx, batchTask)
return nil, fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
}
taskMsg, err := bp.formatProverTask(ctx, batchTask.Hash)
if err != nil {
bp.recoverProvingStatus(ctx, batchTask)
return nil, fmt.Errorf("format prover failure, id:%s error:%w", batchTask.Hash, err)
}
return taskMsg, nil
}
func (bp *BatchProverTask) formatProverTask(ctx context.Context, taskID string) (*coordinatorType.GetTaskSchema, error) {
// get chunk from db
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, taskID)
if err != nil {
err = fmt.Errorf("failed to get chunk proofs for batch task id:%s err:%w ", taskID, err)
return nil, err
}
var chunkProofs []*message.ChunkProof
var chunkInfos []*message.ChunkInfo
for _, chunk := range chunks {
var proof message.ChunkProof
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, taskID, chunk.Hash)
}
chunkProofs = append(chunkProofs, &proof)
chunkInfo := message.ChunkInfo{
ChainID: bp.cfg.L2.ChainID,
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
PostStateRoot: common.HexToHash(chunk.StateRoot),
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
DataHash: common.HexToHash(chunk.Hash),
IsPadding: false,
}
chunkInfos = append(chunkInfos, &chunkInfo)
}
taskDetail := message.BatchTaskDetail{
ChunkInfos: chunkInfos,
ChunkProofs: chunkProofs,
}
chunkProofsBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", taskID, err)
}
taskMsg := &coordinatorType.GetTaskSchema{
TaskID: taskID,
TaskType: int(message.ProofTypeBatch),
TaskData: string(chunkProofsBytes),
}
return taskMsg, nil
}
// recoverProvingStatus if not return the batch task to prover success,
// need recover the proving status to unassigned
func (bp *BatchProverTask) recoverProvingStatus(ctx *gin.Context, batchTask *orm.Batch) {
if err := bp.batchOrm.UpdateProvingStatus(ctx, batchTask.Hash, types.ProvingTaskUnassigned); err != nil {
log.Warn("failed to recover batch proving status", "hash:", batchTask.Hash, "error", err)
}
}

View File

@@ -0,0 +1,134 @@
package provertask
import (
"context"
"encoding/json"
"fmt"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ChunkProverTask the chunk prover task
type ChunkProverTask struct {
BaseProverTask
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, db *gorm.DB) *ChunkProverTask {
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
db: db,
cfg: cfg,
chunkOrm: orm.NewChunk(db),
blockOrm: orm.NewL2Block(db),
proverTaskOrm: orm.NewProverTask(db),
},
}
return cp
}
// Assign the chunk proof which need to prove
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from contex failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from contex failed")
}
// load and send chunk tasks
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
if err != nil {
return nil, fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
}
if len(chunkTasks) == 0 {
return nil, nil
}
if len(chunkTasks) != 1 {
return nil, fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
}
chunkTask := chunkTasks[0]
log.Info("start chunk generation session", "id", chunkTask.Hash)
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
}
proverTask := orm.ProverTask{
TaskID: chunkTask.Hash,
ProverPublicKey: publicKey.(string),
TaskType: int16(message.ProofTypeChunk),
ProverName: proverName.(string),
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go
AssignedAt: utils.NowUTC(),
}
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
return nil, fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, publicKey, err)
}
taskMsg, err := cp.formatProverTask(ctx, chunkTask.Hash)
if err != nil {
cp.recoverProvingStatus(ctx, chunkTask)
return nil, fmt.Errorf("format prover task failure, id:%s error:%w", chunkTask.Hash, err)
}
return taskMsg, nil
}
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, hash string) (*coordinatorType.GetTaskSchema, error) {
// Get block hashes.
wrappedBlocks, wrappedErr := cp.blockOrm.GetL2BlocksByChunkHash(ctx, hash)
if wrappedErr != nil || len(wrappedBlocks) == 0 {
return nil, fmt.Errorf("failed to fetch wrapped blocks, batch hash:%s err:%w", hash, wrappedErr)
}
blockHashes := make([]common.Hash, len(wrappedBlocks))
for i, wrappedBlock := range wrappedBlocks {
blockHashes[i] = wrappedBlock.Header.Hash()
}
taskDetail := message.ChunkTaskDetail{
BlockHashes: blockHashes,
}
blockHashesBytes, err := json.Marshal(taskDetail)
if err != nil {
return nil, fmt.Errorf("failed to marshal block hashes hash:%s, err:%w", hash, err)
}
proverTaskSchema := &coordinatorType.GetTaskSchema{
TaskID: hash,
TaskType: int(message.ProofTypeChunk),
TaskData: string(blockHashesBytes),
}
return proverTaskSchema, nil
}
// recoverProvingStatus if not return the batch task to prover success,
// need recover the proving status to unassigned
func (cp *ChunkProverTask) recoverProvingStatus(ctx *gin.Context, chunkTask *orm.Chunk) {
if err := cp.chunkOrm.UpdateProvingStatus(ctx, chunkTask.Hash, types.ProvingTaskUnassigned); err != nil {
log.Warn("failed to recover chunk proving status", "hash:", chunkTask.Hash, "error", err)
}
}

View File

@@ -1,8 +1,9 @@
package collector
package provertask
import (
"context"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
@@ -12,28 +13,19 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
const (
// BatchCollectorName the name of batch collector
BatchCollectorName = "batch_collector"
// ChunkCollectorName the name of chunk collector
ChunkCollectorName = "chunk_collector"
)
var coordinatorSessionsTimeoutTotalCounter = gethMetrics.NewRegisteredCounter("coordinator/sessions/timeout/total", metrics.ScrollRegistry)
// Collector the interface of a collector who send data to prover
type Collector interface {
Name() string
Collect(ctx context.Context) error
// ProverTask the interface of a collector who send data to prover
type ProverTask interface {
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
}
// BaseCollector a base collector which contain series functions
type BaseCollector struct {
// BaseProverTask a base prover task which contain series functions
type BaseProverTask struct {
cfg *config.Config
ctx context.Context
db *gorm.DB
@@ -45,7 +37,7 @@ type BaseCollector struct {
}
// checkAttempts use the count of prover task info to check the attempts
func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.ProofType) bool {
func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.ProofType) bool {
whereFields := make(map[string]interface{})
whereFields["task_id"] = hash
whereFields["task_type"] = int16(taskType)
@@ -55,17 +47,11 @@ func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.Proo
return true
}
if len(proverTasks) >= int(b.cfg.ProverManagerConfig.SessionAttempts) {
if len(proverTasks) >= int(b.cfg.ProverManager.SessionAttempts) {
coordinatorSessionsTimeoutTotalCounter.Inc(1)
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
for _, proverTask := range proverTasks {
if types.ProvingStatus(proverTask.ProvingStatus) == types.ProvingTaskFailed {
provermanager.Manager.FreeTaskIDForProver(proverTask.ProverPublicKey, hash)
}
}
transErr := b.db.Transaction(func(tx *gorm.DB) error {
switch message.ProofType(proverTasks[0].TaskType) {
case message.ProofTypeChunk:
@@ -89,29 +75,3 @@ func (b *BaseCollector) checkAttemptsExceeded(hash string, taskType message.Proo
}
return true
}
func (b *BaseCollector) sendTask(taskMsg *message.TaskMsg) ([]*coordinatorType.ProverStatus, error) {
var err error
var proverStatusList []*coordinatorType.ProverStatus
for i := uint8(0); i < b.cfg.ProverManagerConfig.ProversPerSession; i++ {
proverPubKey, proverName, sendErr := provermanager.Manager.SendTask(taskMsg.Type, taskMsg)
if sendErr != nil {
err = sendErr
continue
}
provermanager.Manager.UpdateMetricProverProofsLastAssignedTimestampGauge(proverPubKey)
proverStatus := &coordinatorType.ProverStatus{
PublicKey: proverPubKey,
Name: proverName,
Status: types.ProverAssigned,
}
proverStatusList = append(proverStatusList, proverStatus)
}
if err != nil {
return nil, err
}
return proverStatusList, nil
}

View File

@@ -1,4 +1,4 @@
package proof
package submitproof
import (
"context"
@@ -6,6 +6,7 @@ import (
"fmt"
"time"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
@@ -15,9 +16,9 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
)
var (
@@ -37,25 +38,25 @@ var (
ErrValidatorFailureProverInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
)
// ZKProofReceiver the proof receiver
type ZKProofReceiver struct {
// ProofReceiverLogic the proof receiver logic
type ProofReceiverLogic struct {
chunkOrm *orm.Chunk
batchOrm *orm.Batch
proverTaskOrm *orm.ProverTask
db *gorm.DB
cfg *config.ProverManagerConfig
cfg *config.ProverManager
verifier *verifier.Verifier
}
// NewZKProofReceiver create a proof receiver
func NewZKProofReceiver(cfg *config.ProverManagerConfig, db *gorm.DB) *ZKProofReceiver {
// NewSubmitProofReceiverLogic create a proof receiver logic
func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB) *ProofReceiverLogic {
vf, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
panic("proof receiver new verifier failure")
}
return &ZKProofReceiver{
return &ProofReceiverLogic{
chunkOrm: orm.NewChunk(db),
batchOrm: orm.NewBatch(db),
proverTaskOrm: orm.NewProverTask(db),
@@ -70,9 +71,11 @@ func NewZKProofReceiver(cfg *config.ProverManagerConfig, db *gorm.DB) *ZKProofRe
// HandleZkProof handle a ZkProof submitted from a prover.
// For now only proving/verifying error will lead to setting status as skipped.
// db/unmarshal errors will not because they are errors on the business logic side.
func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.ProofMsg) error {
pk, _ := proofMsg.PublicKey()
provermanager.Manager.UpdateMetricProverProofsLastFinishedTimestampGauge(pk)
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg) error {
pk := ctx.GetString(coordinatorType.PublicKey)
if len(pk) == 0 {
return fmt.Errorf("get public key from contex failed")
}
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
if proverTask == nil || err != nil {
@@ -141,8 +144,6 @@ func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.P
// TODO: Prover needs to be slashed if proof is invalid.
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
provermanager.Manager.UpdateMetricProverProofsVerifiedFailedTimeTimer(pk, proofTime)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
return nil
@@ -153,12 +154,11 @@ func (m *ZKProofReceiver) HandleZkProof(ctx context.Context, proofMsg *message.P
}
coordinatorProofsVerifiedSuccessTimeTimer.Update(proofTime)
provermanager.Manager.UpdateMetricProverProofsVerifiedSuccessTimeTimer(pk, proofTime)
return nil
}
func (m *ZKProofReceiver) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, chunkHash string) error {
batchHash, err := m.chunkOrm.GetChunkBatchHash(ctx, chunkHash)
if err != nil {
return err
@@ -177,7 +177,7 @@ func (m *ZKProofReceiver) checkAreAllChunkProofsReady(ctx context.Context, chunk
return nil
}
func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
func (m *ProofReceiverLogic) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
// Ensure this prover is eligible to participate in the prover task.
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
@@ -198,8 +198,6 @@ func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proof
if proofMsg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
provermanager.Manager.UpdateMetricProverProofsGeneratedFailedTimeTimer(pk, proofTime)
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
return ErrValidatorFailureProofMsgStatusNotOk
@@ -207,31 +205,29 @@ func (m *ZKProofReceiver) validator(proverTask *orm.ProverTask, pk string, proof
return nil
}
func (m *ZKProofReceiver) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskFailed); err != nil {
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
func (m *ZKProofReceiver) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
}
}
func (m *ZKProofReceiver) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg.Type, types.ProvingTaskVerified); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
return err
}
provermanager.Manager.FreeTaskIDForProver(pubKey, hash)
return nil
}
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ZKProofReceiver) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
// if the prover task failure type is SessionInfoFailureTimeout,
// just skip update the status because the proof result come too late.
if m.checkIsTimeoutFailure(ctx, hash, proverPublicKey) {
@@ -285,7 +281,7 @@ func (m *ZKProofReceiver) updateProofStatus(ctx context.Context, hash string, pr
return nil
}
func (m *ZKProofReceiver) checkIsTaskSuccess(ctx context.Context, hash string, proofType message.ProofType) bool {
func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string, proofType message.ProofType) bool {
var provingStatus types.ProvingStatus
var err error
@@ -305,7 +301,7 @@ func (m *ZKProofReceiver) checkIsTaskSuccess(ctx context.Context, hash string, p
return provingStatus == types.ProvingTaskVerified
}
func (m *ZKProofReceiver) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
func (m *ProofReceiverLogic) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, hash, proverPublicKey)
if err != nil {
return false

View File

@@ -0,0 +1,50 @@
package middleware
import (
"crypto/rand"
"encoding/base64"
"time"
"github.com/gin-gonic/gin"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
)
// ChallengeMiddleware jwt challenge middleware
func ChallengeMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
Authenticator: func(c *gin.Context) (interface{}, error) {
return nil, nil
},
PayloadFunc: func(data interface{}) jwt.MapClaims {
b := make([]byte, 32)
_, err := rand.Read(b)
if err != nil {
return jwt.MapClaims{}
}
return jwt.MapClaims{
"random": base64.URLEncoding.EncodeToString(b),
}
},
Unauthorized: unauthorized,
Key: []byte(conf.Auth.Secret),
Timeout: time.Second * time.Duration(conf.Auth.ChallengeExpireDurationSec),
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",
TimeFunc: time.Now,
LoginResponse: loginResponse,
})
if err != nil {
log.Crit("new jwt middleware panic", "error", err)
}
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
log.Crit("init jwt middleware panic", "error", errInit)
}
return jwtMiddleware
}

View File

@@ -0,0 +1,34 @@
package middleware
import (
"errors"
"strings"
"time"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"scroll-tech/common/types"
coordinatorType "scroll-tech/coordinator/internal/types"
)
func unauthorized(c *gin.Context, _ int, message string) {
lower := strings.ToLower(message)
var errCode int
err := errors.New(lower)
if jwt.ErrExpiredToken.Error() == lower {
errCode = types.ErrJWTTokenExpired
} else {
errCode = types.ErrJWTCommonErr
}
coordinatorType.RenderJSON(c, errCode, err, nil)
}
func loginResponse(c *gin.Context, code int, message string, time time.Time) {
resp := coordinatorType.LoginSchema{
Time: time,
Token: message,
}
coordinatorType.RenderJSON(c, types.Success, nil, resp)
}

View File

@@ -0,0 +1,39 @@
package middleware
import (
"time"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/types"
)
// LoginMiddleware jwt auth middleware
func LoginMiddleware(conf *config.Config) *jwt.GinJWTMiddleware {
jwtMiddleware, err := jwt.New(&jwt.GinJWTMiddleware{
PayloadFunc: api.Auth.PayloadFunc,
IdentityHandler: api.Auth.IdentityHandler,
IdentityKey: types.PublicKey,
Key: []byte(conf.Auth.Secret),
Timeout: time.Second * time.Duration(conf.Auth.LoginExpireDurationSec),
Authenticator: api.Auth.Login,
Unauthorized: unauthorized,
TokenLookup: "header: Authorization, query: token, cookie: jwt",
TokenHeadName: "Bearer",
TimeFunc: time.Now,
LoginResponse: loginResponse,
})
if err != nil {
log.Crit("new jwt middleware panic", "error", err)
}
if errInit := jwtMiddleware.MiddlewareInit(); errInit != nil {
log.Crit("init jwt middleware panic", "error", errInit)
}
return jwtMiddleware
}

View File

@@ -7,12 +7,13 @@ import (
"fmt"
"time"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
const defaultBatchHeaderVersion = 0
@@ -273,3 +274,28 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa
}
return nil
}
// UpdateUnassignedBatchReturning update the unassigned batch and return the update record
func (o *Batch) UpdateUnassignedBatchReturning(ctx context.Context, limit int) ([]*Batch, error) {
if limit < 0 {
return nil, errors.New("limit must not be smaller than zero")
}
if limit == 0 {
return nil, nil
}
db := o.db.WithContext(ctx)
subQueryDB := db.Model(&Batch{}).Select("index")
subQueryDB = subQueryDB.Where("proving_status = ? AND chunk_proofs_status = ?", types.ProvingTaskUnassigned, types.ChunkProofsStatusReady)
subQueryDB = subQueryDB.Order("index ASC")
subQueryDB = subQueryDB.Limit(limit)
var batches []*Batch
db = db.Model(&batches).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Batch.UpdateUnassignedBatchReturning error: %w", err)
}
return batches, nil
}

View File

@@ -0,0 +1,57 @@
package orm
import (
"context"
"fmt"
"time"
"gorm.io/gorm"
)
// Challenge store the challenge string from prover client
type Challenge struct {
db *gorm.DB `gorm:"column:-"`
ID int64 `json:"id" gorm:"column:id"`
Challenge string `json:"challenge" gorm:"column:challenge"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at"`
}
// NewChallenge creates a new change instance.
func NewChallenge(db *gorm.DB) *Challenge {
return &Challenge{db: db}
}
// TableName returns the name of the "prover_task" table.
func (r *Challenge) TableName() string {
return "challenge"
}
// InsertChallenge check the challenge string exist, if the challenge string is existed
// return error, if not, just insert it
func (r *Challenge) InsertChallenge(ctx context.Context, challengeString string) error {
challenge := Challenge{
Challenge: challengeString,
}
db := r.db.WithContext(ctx)
db = db.Model(&Challenge{})
db = db.Where("challenge = ?", challengeString)
result := db.FirstOrCreate(&challenge)
if result.Error != nil {
return result.Error
}
if result.RowsAffected == 1 {
return nil
}
if result.RowsAffected == 0 {
return fmt.Errorf("the challenge string:%s have been used", challengeString)
}
return fmt.Errorf("insert challenge string affected rows more than 1")
}

View File

@@ -7,11 +7,12 @@ import (
"fmt"
"time"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
)
// Chunk represents a chunk of blocks in the database.
@@ -119,7 +120,7 @@ func (o *Chunk) GetProofsByBatchHash(ctx context.Context, batchHash string) ([]*
for _, chunk := range chunks {
var proof message.ChunkProof
if err := json.Unmarshal(chunk.Proof, &proof); err != nil {
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash error: %w, batch hash: %v, chunk hash: %v", err, batchHash, chunk.Hash)
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", err, batchHash, chunk.Hash)
}
proofs = append(proofs, &proof)
}
@@ -339,3 +340,29 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
}
return nil
}
// UpdateUnassignedChunkReturning update the unassigned batch which end_block_number <= height and return the update record
func (o *Chunk) UpdateUnassignedChunkReturning(ctx context.Context, height, limit int) ([]*Chunk, error) {
if limit < 0 {
return nil, errors.New("limit must not be smaller than zero")
}
if limit == 0 {
return nil, nil
}
db := o.db.WithContext(ctx)
subQueryDB := db.Model(&Chunk{}).Select("index")
subQueryDB = subQueryDB.Where("proving_status = ?", types.ProvingTaskUnassigned)
subQueryDB = subQueryDB.Where("end_block_number <= ?", height)
subQueryDB = subQueryDB.Order("index ASC")
subQueryDB = subQueryDB.Limit(limit)
var chunks []*Chunk
db = db.Model(&chunks).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Chunk.UpdateUnassignedBatchReturning error: %w", err)
}
return chunks, nil
}

View File

@@ -135,3 +135,18 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlo
}
return nil
}
// UpdateChunkHashInRange updates the chunk hash for l2 blocks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices.
// for unit test
func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startNumber uint64, endNumber uint64, chunkHash string) error {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number >= ? AND number <= ?", startNumber, endNumber)
if err := db.Update("chunk_hash", chunkHash).Error; err != nil {
return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start number: %v, end number: %v, chunk hash: %v",
err, startNumber, endNumber, chunkHash)
}
return nil
}

View File

@@ -0,0 +1,33 @@
package route
import (
"github.com/gin-gonic/gin"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/middleware"
)
// Route register route for coordinator
func Route(router *gin.Engine, cfg *config.Config) {
r := router.Group("coordinator")
v1(r, cfg)
}
func v1(router *gin.RouterGroup, conf *config.Config) {
r := router.Group("/v1")
challengeMiddleware := middleware.ChallengeMiddleware(conf)
r.GET("/challenge", challengeMiddleware.LoginHandler)
loginMiddleware := middleware.LoginMiddleware(conf)
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
// need jwt token api
r.Use(loginMiddleware.MiddlewareFunc())
{
r.GET("/healthz", api.HealthCheck.HealthCheck)
r.POST("/get_task", api.GetTask.GetTasks)
r.POST("/submit_proof", api.SubmitProof.SubmitProof)
}
}

View File

@@ -0,0 +1,31 @@
package types
import "time"
const (
// PublicKey the public key for context
PublicKey = "public_key"
// ProverName the prover name key for context
ProverName = "prover_name"
// ProverVersion the prover version for context
ProverVersion = "prover_version"
)
// Message the login message struct
type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
}
// LoginParameter for /login api
type LoginParameter struct {
Message Message `form:"message" json:"message" binding:"required"`
Signature string `form:"signature" json:"signature" binding:"required"`
}
// LoginSchema for /login response
type LoginSchema struct {
Time time.Time `json:"time"`
Token string `json:"token"`
}

View File

@@ -0,0 +1,14 @@
package types
// GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct {
ProverHeight int `form:"prover_height" json:"prover_height" binding:"required"`
TaskType int `form:"task_type" json:"task_type"`
}
// GetTaskSchema the schema data return to prover for get prover task
type GetTaskSchema struct {
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
TaskData string `json:"task_data"`
}

View File

@@ -0,0 +1,28 @@
package types
import (
"net/http"
"github.com/gin-gonic/gin"
)
// Response the response schema
type Response struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data interface{} `json:"data"`
}
// RenderJSON renders response with json
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
var errMsg string
if err != nil {
errMsg = err.Error()
}
renderData := Response{
ErrCode: errCode,
ErrMsg: errMsg,
Data: data,
}
ctx.JSON(http.StatusOK, renderData)
}

View File

@@ -0,0 +1,9 @@
package types
// SubmitProofParameter the SubmitProof api request parameter
type SubmitProofParameter struct {
TaskID string `form:"task_id" json:"task_id" binding:"required"`
TaskType int `form:"task_type" json:"task_type" binding:"required"`
Status int `form:"status" json:"status"`
Proof string `form:"proof" json:"proof"`
}

View File

@@ -1,22 +1,20 @@
package test
import (
"compress/flate"
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"math/big"
"net/http"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
"gorm.io/gorm"
"scroll-tech/database/migrate"
@@ -25,18 +23,17 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/coordinator/client"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/logic/provermanager"
"scroll-tech/coordinator/internal/orm"
"scroll-tech/coordinator/internal/route"
)
var (
dbCfg *database.Config
conf *config.Config
base *docker.App
@@ -48,6 +45,8 @@ var (
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
chunk *types.Chunk
tokenTimeout int
)
func TestMain(m *testing.M) {
@@ -61,33 +60,48 @@ func randomURL() string {
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setupCoordinator(t *testing.T, proversPerSession uint8, wsURL string, resetDB bool) (*http.Server, *cron.Collector) {
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string) (*cron.Collector, *http.Server) {
var err error
db, err = database.InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
if resetDB {
assert.NoError(t, migrate.ResetDB(sqlDB))
}
assert.NoError(t, migrate.ResetDB(sqlDB))
conf := config.Config{
L2Config: &config.L2Config{ChainID: 111},
ProverManagerConfig: &config.ProverManagerConfig{
tokenTimeout = 6
conf = &config.Config{
L2: &config.L2{
ChainID: 111,
},
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
CollectionTimeSec: 10,
MaxVerifierWorkers: 10,
SessionAttempts: 5,
},
Auth: &config.Auth{
ChallengeExpireDurationSec: tokenTimeout,
LoginExpireDurationSec: tokenTimeout,
},
}
proofCollector := cron.NewCollector(context.Background(), db, &conf)
tmpAPI := api.RegisterAPIs(&conf, db)
handler, _, err := utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], tmpAPI, flate.NoCompression)
assert.NoError(t, err)
provermanager.InitProverManager(db)
return handler, proofCollector
proofCollector := cron.NewCollector(context.Background(), db, conf)
router := gin.Default()
api.InitController(conf, db)
route.Route(router, conf)
srv := &http.Server{
Addr: coordinatorURL,
Handler: router,
}
go func() {
runErr := srv.ListenAndServe()
if runErr != nil && !errors.Is(runErr, http.ErrServerClosed) {
assert.NoError(t, runErr)
}
}()
return proofCollector, srv
}
func setEnv(t *testing.T) {
@@ -112,13 +126,13 @@ func setEnv(t *testing.T) {
chunkOrm = orm.NewChunk(db)
l2BlockOrm = orm.NewL2Block(db)
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock1 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock1)
assert.NoError(t, err)
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
templateBlockTrace, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
wrappedBlock2 = &types.WrappedBlock{}
err = json.Unmarshal(templateBlockTrace, wrappedBlock2)
@@ -135,13 +149,10 @@ func TestApis(t *testing.T) {
t.Run("TestHandshake", testHandshake)
t.Run("TestFailedHandshake", testFailedHandshake)
t.Run("TestSeveralConnections", testSeveralConnections)
t.Run("TestValidProof", testValidProof)
t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
t.Run("TestTimeoutProof", testTimeoutProof)
t.Run("TestIdleProverSelection", testIdleProverSelection)
t.Run("TestGracefulRestart", testGracefulRestart)
// Teardown
t.Cleanup(func() {
@@ -150,141 +161,64 @@ func TestApis(t *testing.T) {
}
func testHandshake(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
handler.Shutdown(context.Background())
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
prover1 := newMockProver(t, "prover_test", wsURL, message.ProofTypeChunk)
defer prover1.close()
prover2 := newMockProver(t, "prover_test", wsURL, message.ProofTypeBatch)
defer prover2.close()
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
chunkProver := newMockProver(t, "prover_chunk_test", coordinatorURL, message.ProofTypeChunk)
token := chunkProver.connectToCoordinator(t)
assert.NotEmpty(t, token)
assert.True(t, chunkProver.healthCheck(t, token, types.Success))
}
func testFailedHandshake(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
// Setup coordinator and http server.
coordinatorURL := randomURL()
proofCollector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
handler.Shutdown(context.Background())
proofCollector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
// prepare
name := "prover_test"
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Try to perform handshake without token
// create a new ws connection
c, err := client.DialContext(ctx, wsURL)
assert.NoError(t, err)
// create private key
privkey, err := crypto.GenerateKey()
assert.NoError(t, err)
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: name,
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
chunkProver := newMockProver(t, "prover_chunk_test", coordinatorURL, message.ProofTypeChunk)
token := chunkProver.connectToCoordinator(t)
assert.NotEmpty(t, token)
assert.True(t, chunkProver.healthCheck(t, token, types.Success))
// Try to perform handshake with timeouted token
// create a new ws connection
c, err = client.DialContext(ctx, wsURL)
assert.NoError(t, err)
// create private key
privkey, err = crypto.GenerateKey()
assert.NoError(t, err)
authMsg = &message.AuthMsg{
Identity: &message.Identity{
Name: name,
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
token, err := c.RequestToken(ctx, authMsg)
assert.NoError(t, err)
authMsg.Identity.Token = token
assert.NoError(t, authMsg.SignWithKey(privkey))
<-time.After(6 * time.Second)
_, err = c.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
assert.Equal(t, 0, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
}
func testSeveralConnections(t *testing.T) {
wsURL := "ws://" + randomURL()
handler, proofCollector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
proofCollector.Stop()
}()
var (
batch = 200
eg = errgroup.Group{}
provers = make([]*mockProver, batch)
)
for i := 0; i < batch; i += 2 {
idx := i
eg.Go(func() error {
provers[idx] = newMockProver(t, "prover_test_"+strconv.Itoa(idx), wsURL, message.ProofTypeChunk)
provers[idx+1] = newMockProver(t, "prover_test_"+strconv.Itoa(idx+1), wsURL, message.ProofTypeBatch)
return nil
})
}
assert.NoError(t, eg.Wait())
// check prover's idle connections
assert.Equal(t, batch/2, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, batch/2, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
// close connection
for _, prover := range provers {
prover.close()
}
var (
tick = time.Tick(time.Second)
tickStop = time.Tick(time.Minute)
)
for {
select {
case <-tick:
if provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk) == 0 {
return
}
case <-tickStop:
t.Error("prover connect is blocked")
return
}
}
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch)
token = chunkProver.connectToCoordinator(t)
assert.NotEmpty(t, token)
<-time.After(time.Duration(tokenTimeout+1) * time.Second)
assert.True(t, batchProver.healthCheck(t, token, types.ErrJWTTokenExpired))
}
func testValidProof(t *testing.T) {
wsURL := "ws://" + randomURL()
handler, collector := setupCoordinator(t, 3, wsURL, true)
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
handler.Shutdown(context.Background())
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
// create mock provers.
provers := make([]*mockProver, 6)
provers := make([]*mockProver, 2)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
@@ -292,34 +226,18 @@ func testValidProof(t *testing.T) {
} else {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), wsURL, proofType)
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
// only prover 0 & 1 submit valid proofs.
proofStatus := generatedFailed
if i <= 1 {
proofStatus = verifiedSuccess
}
provers[i].waitTaskAndSendProof(t, time.Second, false, proofStatus)
proverTask := provers[i].getProverTask(t, proofType)
assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, proofStatus)
}
defer func() {
// close connection
for _, prover := range provers {
prover.close()
}
}()
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
@@ -348,15 +266,26 @@ func testValidProof(t *testing.T) {
func testInvalidProof(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
handler, collector := setupCoordinator(t, 3, wsURL, true)
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
handler.Shutdown(context.Background())
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// create mock provers.
provers := make([]*mockProver, 6)
provers := make([]*mockProver, 2)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
@@ -364,26 +293,11 @@ func testInvalidProof(t *testing.T) {
} else {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), wsURL, proofType)
provers[i].waitTaskAndSendProof(t, time.Second, false, verifiedFailed)
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
proverTask := provers[i].getProverTask(t, proofType)
assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, verifiedFailed)
}
defer func() {
// close connection
for _, prover := range provers {
prover.close()
}
}()
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// verify proof status
var (
@@ -413,15 +327,26 @@ func testInvalidProof(t *testing.T) {
func testProofGeneratedFailed(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
handler, collector := setupCoordinator(t, 3, wsURL, true)
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL)
defer func() {
handler.Shutdown(context.Background())
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// create mock provers.
provers := make([]*mockProver, 6)
provers := make([]*mockProver, 2)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
@@ -429,26 +354,11 @@ func testProofGeneratedFailed(t *testing.T) {
} else {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), wsURL, proofType)
provers[i].waitTaskAndSendProof(t, time.Second, false, generatedFailed)
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType)
proverTask := provers[i].getProverTask(t, proofType)
assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, generatedFailed)
}
defer func() {
// close connection
for _, prover := range provers {
prover.close()
}
}()
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 3, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// verify proof status
var (
@@ -478,226 +388,62 @@ func testProofGeneratedFailed(t *testing.T) {
func testTimeoutProof(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
handler, collector := setupCoordinator(t, 1, wsURL, true)
coordinatorURL := randomURL()
collector, httpHandler := setupCoordinator(t, 1, coordinatorURL)
defer func() {
handler.Shutdown(context.Background())
collector.Stop()
assert.NoError(t, httpHandler.Shutdown(context.Background()))
}()
// create first chunk & batch mock prover, that will not send any proof.
chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), wsURL, message.ProofTypeChunk)
batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), wsURL, message.ProofTypeBatch)
defer func() {
// close connection
chunkProver1.close()
batchProver1.close()
}()
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// verify proof status, it should be assigned, because prover didn't send any proof
var chunkProofStatus types.ProvingStatus
var batchProofStatus types.ProvingStatus
// create first chunk & batch mock prover, that will not send any proof.
chunkProver1 := newMockProver(t, "prover_test"+strconv.Itoa(0), coordinatorURL, message.ProofTypeChunk)
proverChunkTask := chunkProver1.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, proverChunkTask)
ok := utils.TryTimes(30, func() bool {
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
if err != nil {
return false
}
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
if err != nil {
return false
}
return chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned
})
assert.Falsef(t, !ok, "failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
batchProver1 := newMockProver(t, "prover_test"+strconv.Itoa(1), coordinatorURL, message.ProofTypeBatch)
proverBatchTask := batchProver1.getProverTask(t, message.ProofTypeBatch)
assert.NotNil(t, proverBatchTask)
// verify proof status, it should be assigned, because prover didn't send any proof
chunkProofStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, chunkProofStatus, types.ProvingTaskAssigned)
batchProofStatus, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, batchProofStatus, types.ProvingTaskAssigned)
// wait coordinator to reset the prover task proving status
time.Sleep(time.Duration(conf.ProverManager.CollectionTimeSec*2) * time.Second)
// create second mock prover, that will send valid proof.
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), wsURL, message.ProofTypeChunk)
chunkProver2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), wsURL, message.ProofTypeBatch)
batchProver2.waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
defer func() {
// close connection
chunkProver2.close()
batchProver2.close()
}()
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, 1, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
chunkProver2 := newMockProver(t, "prover_test"+strconv.Itoa(2), coordinatorURL, message.ProofTypeChunk)
proverChunkTask2 := chunkProver2.getProverTask(t, message.ProofTypeChunk)
assert.NotNil(t, proverChunkTask2)
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess)
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch)
proverBatchTask2 := batchProver2.getProverTask(t, message.ProofTypeBatch)
assert.NotNil(t, proverBatchTask2)
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess)
// verify proof status, it should be verified now, because second prover sent valid proof
ok = utils.TryTimes(200, func() bool {
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
if err != nil {
return false
}
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
if err != nil {
return false
}
return chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified
})
assert.Falsef(t, !ok, "failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
}
func testIdleProverSelection(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
handler, collector := setupCoordinator(t, 1, wsURL, true)
defer func() {
handler.Shutdown(context.Background())
collector.Stop()
}()
// create mock provers.
provers := make([]*mockProver, 20)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), wsURL, proofType)
provers[i].waitTaskAndSendProof(t, time.Second, false, verifiedSuccess)
}
defer func() {
// close connection
for _, prover := range provers {
prover.close()
}
}()
assert.Equal(t, len(provers)/2, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeChunk))
assert.Equal(t, len(provers)/2, provermanager.Manager.GetNumberOfIdleProvers(message.ProofTypeBatch))
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
var chunkProofStatus types.ProvingStatus
var batchProofStatus types.ProvingStatus
for {
select {
case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
return
}
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
return
}
}
}
func testGracefulRestart(t *testing.T) {
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
handler, collector := setupCoordinator(t, 1, wsURL, true)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
// create mock prover
chunkProver := newMockProver(t, "prover_test", wsURL, message.ProofTypeChunk)
batchProver := newMockProver(t, "prover_test", wsURL, message.ProofTypeBatch)
// wait 10 seconds, coordinator restarts before prover submits proof
chunkProver.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
batchProver.waitTaskAndSendProof(t, 10*time.Second, false, verifiedSuccess)
// wait for coordinator to dispatch task
<-time.After(5 * time.Second)
// the coordinator will delete the prover if the subscription is closed.
chunkProver.close()
batchProver.close()
provingStatus, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned, provingStatus)
// Close proverManager and ws handler.
handler.Shutdown(context.Background())
collector.Stop()
// Setup new coordinator and ws server.
newHandler, newCollector := setupCoordinator(t, 1, wsURL, false)
defer func() {
newHandler.Shutdown(context.Background())
newCollector.Stop()
}()
// at this point, prover haven't submitted
status, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskAssigned, status)
status, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, types.ProvingTaskUnassigned, status) // chunk proofs not ready yet
// will overwrite the prover client for `SubmitProof`
chunkProver.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
batchProver.waitTaskAndSendProof(t, time.Second, true, verifiedSuccess)
defer func() {
chunkProver.close()
batchProver.close()
}()
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(15 * time.Second)
)
var chunkProofStatus types.ProvingStatus
var batchProofStatus types.ProvingStatus
for {
select {
case <-tick:
// this proves that the prover submits to the new coordinator,
// because the prover client for `submitProof` has been overwritten
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskVerified && batchProofStatus == types.ProvingTaskVerified {
return
}
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
return
}
}
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, chunkProofStatus2, types.ProvingTaskVerified)
batchProofStatus2, err := batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, batchProofStatus2, types.ProvingTaskVerified)
}

View File

@@ -1,20 +1,22 @@
package test
import (
"context"
"crypto/ecdsa"
"sync"
"encoding/json"
"fmt"
"net/http"
"testing"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/go-resty/resty/v2"
"github.com/mitchellh/mapstructure"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
ctypes "scroll-tech/common/types"
"scroll-tech/common/types/message"
client2 "scroll-tech/coordinator/client"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/types"
)
type proofStatus uint32
@@ -26,133 +28,183 @@ const (
)
type mockProver struct {
proverName string
privKey *ecdsa.PrivateKey
proofType message.ProofType
wsURL string
client *client2.Client
taskCh chan *message.TaskMsg
taskCache sync.Map
sub ethereum.Subscription
stopCh chan struct{}
proverName string
privKey *ecdsa.PrivateKey
proofType message.ProofType
coordinatorURL string
}
func newMockProver(t *testing.T, proverName string, wsURL string, proofType message.ProofType) *mockProver {
func newMockProver(t *testing.T, proverName string, coordinatorURL string, proofType message.ProofType) *mockProver {
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
prover := &mockProver{
proverName: proverName,
privKey: privKey,
proofType: proofType,
wsURL: wsURL,
taskCh: make(chan *message.TaskMsg, 4),
stopCh: make(chan struct{}),
proverName: proverName,
privKey: privKey,
proofType: proofType,
coordinatorURL: coordinatorURL,
}
prover.client, prover.sub, err = prover.connectToCoordinator()
assert.NoError(t, err)
return prover
}
// connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
// Create connection.
client, err := client2.Dial(r.wsURL)
if err != nil {
return nil, nil, err
}
func (r *mockProver) connectToCoordinator(t *testing.T) string {
challengeString := r.challenge(t)
return r.login(t, challengeString)
}
// create a new ws connection
authMsg := &message.AuthMsg{
func (r *mockProver) challenge(t *testing.T) string {
var result types.Response
client := resty.New()
resp, err := client.R().
SetResult(&result).
Get("http://" + r.coordinatorURL + "/coordinator/v1/challenge")
assert.NoError(t, err)
type login struct {
Time string `json:"time"`
Token string `json:"token"`
}
var loginData login
err = mapstructure.Decode(result.Data, &loginData)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Empty(t, result.ErrMsg)
return loginData.Token
}
func (r *mockProver) login(t *testing.T, challengeString string) string {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Name: r.proverName,
ProverType: r.proofType,
Challenge: challengeString,
ProverName: "test",
ProverVersion: "v1.0.0",
},
}
_ = authMsg.SignWithKey(r.privKey)
assert.NoError(t, authMsg.SignWithKey(r.privKey))
token, err := client.RequestToken(context.Background(), authMsg)
if err != nil {
return nil, nil, err
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
var result types.Response
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", challengeString)).
SetBody([]byte(body)).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/login")
assert.NoError(t, err)
type login struct {
Time string `json:"time"`
Token string `json:"token"`
}
authMsg.Identity.Token = token
_ = authMsg.SignWithKey(r.privKey)
var loginData login
err = mapstructure.Decode(result.Data, &loginData)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Empty(t, result.ErrMsg)
return loginData.Token
}
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
if err != nil {
return nil, nil, err
func (r *mockProver) healthCheck(t *testing.T, token string, errCode int) bool {
var result types.Response
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetResult(&result).
Get("http://" + r.coordinatorURL + "/coordinator/v1/healthz")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Equal(t, errCode, result.ErrCode)
return true
}
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) *types.GetTaskSchema {
// get task from coordinator
token := r.connectToCoordinator(t)
assert.NotEmpty(t, token)
type response struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data types.GetTaskSchema `json:"data"`
}
return client, sub, nil
var result response
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Equal(t, ctypes.Success, result.ErrCode)
assert.NotEmpty(t, result.Data.TaskID)
assert.NotEmpty(t, result.Data.TaskType)
assert.NotEmpty(t, result.Data.TaskData)
return &result.Data
}
func (r *mockProver) releaseTasks() {
r.taskCache.Range(func(key, value any) bool {
r.taskCh <- value.(*message.TaskMsg)
r.taskCache.Delete(key)
return true
})
}
// Wait for the proof task, after receiving the proof task, prover submits proof after proofTime secs.
func (r *mockProver) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, proofStatus proofStatus) {
// simulating the case that the prover first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeProver()`
if reconnect {
var err error
r.client, r.sub, err = r.connectToCoordinator()
if err != nil {
t.Fatal(err)
return
}
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus) {
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: proverTaskSchema.TaskID,
Type: message.ProofType(proverTaskSchema.TaskType),
Status: message.RespStatus(proofStatus),
ChunkProof: &message.ChunkProof{},
BatchProof: &message.BatchProof{},
},
}
// Release cached tasks.
r.releaseTasks()
r.stopCh = make(chan struct{})
go r.loop(t, r.client, proofTime, proofStatus, r.stopCh)
}
func (r *mockProver) loop(t *testing.T, client *client2.Client, proofTime time.Duration, proofStatus proofStatus, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
r.taskCache.Store(task.ID, task)
// simulate proof time
select {
case <-time.After(proofTime):
case <-stopCh:
return
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: task.ID,
Type: r.proofType,
Status: message.StatusOk,
ChunkProof: &message.ChunkProof{},
BatchProof: &message.BatchProof{},
},
}
if proofStatus == generatedFailed {
proof.Status = message.StatusProofError
} else if proofStatus == verifiedFailed {
proof.ProofDetail.ChunkProof.Proof = []byte(verifier.InvalidTestProof)
proof.ProofDetail.BatchProof.Proof = []byte(verifier.InvalidTestProof)
}
assert.NoError(t, proof.Sign(r.privKey))
assert.NoError(t, client.SubmitProof(context.Background(), proof))
case <-stopCh:
return
}
if proofStatus == generatedFailed {
proof.Status = message.StatusProofError
} else if proofStatus == verifiedFailed {
proof.ProofDetail.ChunkProof.Proof = []byte(verifier.InvalidTestProof)
proof.ProofDetail.BatchProof.Proof = []byte(verifier.InvalidTestProof)
}
}
func (r *mockProver) close() {
close(r.stopCh)
r.sub.Unsubscribe()
assert.NoError(t, proof.Sign(r.privKey))
submitProof := types.SubmitProofParameter{
TaskID: proof.ID,
TaskType: int(proof.Type),
Status: int(proof.Status),
}
switch proof.Type {
case message.ProofTypeChunk:
encodeData, err := json.Marshal(proof.ChunkProof)
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
submitProof.Proof = string(encodeData)
case message.ProofTypeBatch:
encodeData, err := json.Marshal(proof.BatchProof)
assert.NoError(t, err)
assert.NotEmpty(t, encodeData)
submitProof.Proof = string(encodeData)
}
token := r.connectToCoordinator(t)
assert.NotEmpty(t, token)
submitProofData, err := json.Marshal(submitProof)
assert.NoError(t, err)
assert.NotNil(t, submitProofData)
var result types.Response
client := resty.New()
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(string(submitProofData)).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/submit_proof")
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode())
assert.Equal(t, ctypes.Success, result.ErrCode)
}

View File

@@ -1,545 +0,0 @@
{
"withdrawTrieRoot": "0x0000000000000000000000000000000000000000",
"coinbase": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"header": {
"parentHash": "0xe17f08d25ef61a8ee12aa29704b901345a597f5e45a9a0f603ae0f70845b54dc",
"sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"miner": "0x0000000000000000000000000000000000000000",
"stateRoot": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"transactionsRoot": "0x3057754c197f33e1fe799e996db6232b5257412feea05b3c1754738f0b33fe32",
"receiptsRoot": "0xd95b673818fa493deec414e01e610d97ee287c9421c8eff4102b1647c1a184e4",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"difficulty": "0x2",
"number": "0x2",
"gasLimit": "0x355418d1e8184",
"gasUsed": "0xa410",
"timestamp": "0x63807b2a",
"extraData": "0xd983010a0d846765746889676f312e31372e3133856c696e75780000000000004b54a94f0df14333e63c8a13dfe6097c1a08b5fd2c225a8dc0f199dae245aead55d6f774a980a0c925be407748d56a14106afda7ddc1dec342e7ee3b0d58a8df01",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"nonce": "0x0000000000000000",
"baseFeePerGas": "0x1de9",
"hash": "0xc7b6c7022c8386cdaf6fcd3d4f8d03dce257ae3664a072fdce511ecefce73ad0"
},
"transactions": [
{
"type": 0,
"nonce": 0,
"txHash": "0xb2febc1213baec968f6575789108e175273b8da8f412468098893084229f1542",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514",
"s": "0x34cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b"
},
{
"type": 0,
"nonce": 1,
"txHash": "0xe6ac2ffc543d07f1e280912a2abe3aa659bf83773740681151297ada1bb211dd",
"gas": 500000,
"gasPrice": "0x3b9aec2e",
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"to": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"chainId": "0xcf55",
"value": "0x152d02c7e14af6000000",
"data": "0x",
"isCreate": false,
"v": "0x19ece",
"r": "0xf039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316",
"s": "0x5a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1"
}
],
"storageTrace": {
"rootBefore": "0x2579122e8f9ec1e862e7d415cef2fb495d7698a8e5f0dddc5651ba4236336e7d",
"rootAfter": "0x25b792bfd6d6456451f996e9383225e026fff469da205bb916768c0a78fd16af",
"proofs": {
"0x01bae6BF68E9A03Fb2bc0615b1bf0d69ce9411eD": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0x1C5A77d9FA7eF466951B2F01F724BCa3A5820b63": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
],
"0xc0c4C8bAEA3f6Acb49b6E1fb9e2ADEcEeaCB0cA2": [
"0x01204920151d7e3cd9d1b5ba09d3ad6ea157c82d1cc425731f209e71a007165a9c0404000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4700000000000000000000000000000000000000000000000000000000000000000201c5a77d9fa7ef466951b2f01f724bca3a5820b63000000000000000000000000",
"0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449"
]
}
},
"executionResults": [
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
},
{
"gas": 21000,
"failed": false,
"returnValue": "",
"from": {
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 1,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffead2fd381eb5006a6eb8",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
"to": {
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x0",
"codeHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"accountAfter": [
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"structLogs": []
}
],
"mptwitness": [
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x7d6e333642ba5156dcddf0e5a898765d49fbf2ce15d4e762e8c19e8f2e127925",
"leaf": {
"value": "0xdf92dc6c0dd1c7fde78079ea62863977463f07e542966c6393f4d8cd6cce3117",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 0,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0xf6b9a9f1e25add11bf5d0705e58f4b7a968b281ec23a8d41e719a0e27d87450c",
"leaf": {
"value": "0x716491d19f5e25dc565d05bbde1f30b343b1489b2d923feb30141d24a87c0a00",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x200000000000000000000000000000000000000000000000000000000000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
null,
null
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x01bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed",
"accountKey": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
"accountKey": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920",
"accountPath": [
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
},
{
"pathPart": "0x0",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0xf199fe1a085b5bb134e90d0bfdaf70579fa703ab3db986a6730b44cfd5207b15",
"sibling": "0x9c5a1607a0719e201f7325c41c2dc857a16eadd309bab5d1d93c7e1d15204920"
}
}
],
"accountUpdate": [
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
},
{
"nonce": 2,
"balance": "0x1ffffffffffffffffffffffffffffffffffffffffffd5a5fa703d6a00d4dd70",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
},
{
"address": "0xc0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca2",
"accountKey": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616",
"accountPath": [
{
"pathPart": "0x1",
"root": "0x06954857b2b6569c7dfe8380f8c7fe72d6b7fefca206b1fe74dc6ffbf97c132e",
"path": [
{
"value": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x7f53dc37d5a264eb72d8ae1a31c82239a385d9f6df23b81c48e97862d6d92314"
}
},
{
"pathPart": "0x3",
"root": "0xaf16fd780a8c7616b95b20da69f4ff26e0253238e996f9516445d6d6bf92b725",
"path": [
{
"value": "0x5bbe97e7e66485b203f9dfea64eb7fa7df06959b12cbde2beba14f8f91133a13",
"sibling": "0x34f20c09876841ab1c180877223cc915ca96589b05ecea552aa2b3b9b47de806"
},
{
"value": "0x2e591357b02ab3117c35ad94a4e1a724fdbd95d6463da1f6c8017e6d000ecf02",
"sibling": "0x0000000000000000000000000000000000000000000000000000000000000000"
},
{
"value": "0x794953bb5d8aa00f90383ff435ce2ea58e30e1da1061e69455c38496766ec10f",
"sibling": "0x1b9da0b70b242af37d53f5bda27315b2dbd178f6b4b1e026be43cab8d46b850b"
}
],
"leaf": {
"value": "0x45c70c4b7345dd1705ed019271dd1d7fbe2a1054ecefaf3fd2a22388a483072e",
"sibling": "0x9b38091c0e341793f0e755a1ea7b64bfb06455aced31334598fcfd02d1d94616"
}
}
],
"accountUpdate": [
null,
{
"nonce": 0,
"balance": "0x152d02c7e14af6000000",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
}
],
"commonStateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
"statePath": [
null,
null
],
"stateUpdate": [
null,
null
]
}
]
}

File diff suppressed because one or more lines are too long

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 6, int(cur))
assert.Equal(t, 7, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -0,0 +1,19 @@
-- +goose Up
-- +goose StatementBegin
create table challenge
(
id BIGSERIAL PRIMARY KEY,
challenge VARCHAR NOT NULL ,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
deleted_at TIMESTAMP(0) DEFAULT NULL,
CONSTRAINT uk_challenge UNIQUE (challenge)
);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists challenge;
-- +goose StatementEnd

View File

@@ -511,6 +511,7 @@ github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh
github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY=
github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM=
github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs=
github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU=
github.com/paulmach/orb v0.7.1/go.mod h1:FWRlTgl88VI1RBx/MkrwWDRhQ96ctqMCh8boXhmqB/A=
@@ -557,19 +558,9 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20230220082843-ec9254b0b1c6/go.mod
github.com/scroll-tech/go-ethereum v1.10.14-0.20230306131930-03b4de32b78b/go.mod h1:f9ygxrxL7WRCTzuloV+t/UlcxMq3AL+gcNU60liiNNU=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230802095950-4b2bbf6225e7/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230803074803-b51736fd3c5a/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230803075611-60e260b2f149/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230803110959-ef67ff8bbba4/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230803144545-f87eb3127ab5/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/scroll-tech/zktrie v0.4.3/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/scroll-tech/zktrie v0.5.2/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU=
github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M=
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ=
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
@@ -579,13 +570,9 @@ github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvq
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
@@ -599,17 +586,13 @@ github.com/tdewolff/minify/v2 v2.12.4 h1:kejsHQMM17n6/gwdw53qsi6lg0TGddZADVyQOz1
github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk=
github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZycQ=
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo=
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=
@@ -618,15 +601,11 @@ github.com/valyala/fasthttp v1.40.0 h1:CRq/00MfruPGFLTQKY8b+8SfdK60TxNztjRMnH0t1
github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
github.com/vertica/vertica-sql-go v1.3.2/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
@@ -636,7 +615,6 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwY
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
github.com/yosssi/ace v0.0.5 h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA=
github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=
@@ -645,98 +623,58 @@ github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg=
go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M=
go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
go.opentelemetry.io/otel v1.9.0 h1:8WZNQFIB2a71LnANS9JeyidJKKGOOremcUtb/OtHISw=
go.opentelemetry.io/otel v1.9.0/go.mod h1:np4EoPGzoPs3O67xUVNoPPcmSvsfOxNlNA4F4AC+0Eo=
go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc=
go.opentelemetry.io/otel/trace v1.9.0 h1:oZaCNJUjWcg60VXWee8lJKlqhPbXAPB51URuR47pQYc=
go.opentelemetry.io/otel/trace v1.9.0/go.mod h1:2737Q0MuG8q1uILYm2YYVkAyLtOofiTNGg6VODnOiPo=
go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8=
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28=
golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
gonum.org/v1/gonum v0.6.0 h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b h1:Qh4dB5D/WpoUUp3lSod7qgoyEHbDGPUWjIbnqdqqe1k=
google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f h1:2wh8dWY8959cBGQvk1RD+/eQBgRYYDaZ+hT0/zsARoA=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4=
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@@ -747,7 +685,6 @@ gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v1.4.0 h1:BjtEgfuw8Qyd+jPvQz8CfoxiO/UjFEidWinwEXZiWv0=
@@ -755,28 +692,16 @@ gotest.tools v1.4.0/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.1.3 h1:qTakTkI6ni6LFD5sBwwsdSO+AQqbSIxOauHTTQKZ/7o=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.1/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
modernc.org/sqlite v1.22.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk=
modernc.org/strutil v1.1.2/go.mod h1:OYajnUAcI/MX+XD/Wx7v1bbdvcQSvxgtb0gC+u3d3eg=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE=
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=

182
prover/client/client.go Normal file
View File

@@ -0,0 +1,182 @@
package client
import (
"context"
"crypto/ecdsa"
"fmt"
"sync"
"time"
"github.com/go-resty/resty/v2"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/prover/config"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
)
// CoordinatorClient is a client used for interacting with the Coordinator service.
type CoordinatorClient struct {
client *resty.Client
proverName string
priv *ecdsa.PrivateKey
mu sync.Mutex
}
// NewCoordinatorClient constructs a new CoordinatorClient.
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
client := resty.New().
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
SetRetryCount(cfg.RetryCount).
SetRetryWaitTime(time.Duration(cfg.RetryWaitTimeSec) * time.Second).
SetBaseURL(cfg.BaseURL)
return &CoordinatorClient{
client: client,
proverName: proverName,
priv: priv,
}, nil
}
// Login completes the entire login process in one function call.
func (c *CoordinatorClient) Login(ctx context.Context) error {
c.mu.Lock()
defer c.mu.Unlock()
var challengeResult ChallengeResponse
// Get random string
challengeResp, err := c.client.R().
SetHeader("Content-Type", "application/json").
SetResult(&challengeResult).
Get("/coordinator/v1/challenge")
if err != nil {
return fmt.Errorf("get random string failed: %v", err)
}
if challengeResp.StatusCode() != 200 {
return fmt.Errorf("failed to get random string, status code: %v", challengeResp.StatusCode())
}
// Prepare and sign the login request
authMsg := &message.AuthMsg{
Identity: &message.Identity{
ProverVersion: version.Version,
ProverName: c.proverName,
Challenge: challengeResult.Data.Token,
},
}
err = authMsg.SignWithKey(c.priv)
if err != nil {
return fmt.Errorf("signature failed: %v", err)
}
// Login to coordinator
loginReq := &LoginRequest{
Message: struct {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
}{
Challenge: authMsg.Identity.Challenge,
ProverName: authMsg.Identity.ProverName,
ProverVersion: authMsg.Identity.ProverVersion,
},
Signature: authMsg.Signature,
}
var loginResult LoginResponse
loginResp, err := c.client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", challengeResult.Data.Token)).
SetBody(loginReq).
SetResult(&loginResult).
Post("/coordinator/v1/login")
if err != nil {
return fmt.Errorf("login failed: %v", err)
}
if loginResp.StatusCode() != 200 {
return fmt.Errorf("failed to login, status code: %v", loginResp.StatusCode())
}
if loginResult.ErrCode != types.Success {
return fmt.Errorf("failed to login, error code: %v, error message: %v", loginResult.ErrCode, loginResult.ErrMsg)
}
// store JWT token for future requests
c.client.SetAuthToken(loginResult.Data.Token)
return nil
}
// GetTask sends a request to the coordinator to get prover task.
func (c *CoordinatorClient) GetTask(ctx context.Context, req *GetTaskRequest) (*GetTaskResponse, error) {
var result GetTaskResponse
resp, err := c.client.R().
SetHeader("Content-Type", "application/json").
SetBody(req).
SetResult(&result).
Post("/coordinator/v1/get_task")
if err != nil {
return nil, fmt.Errorf("request for GetTask failed: %v", err)
}
if resp.StatusCode() != 200 {
return nil, fmt.Errorf("failed to get task, status code: %v", resp.StatusCode())
}
if result.ErrCode == types.ErrJWTTokenExpired {
log.Debug("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
return nil, fmt.Errorf("JWT expired, re-login failed: %v", err)
}
return c.GetTask(ctx, req)
}
if result.ErrCode != types.Success {
return nil, fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
}
return &result, nil
}
// SubmitProof sends a request to the coordinator to submit proof.
func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofRequest) error {
var result SubmitProofResponse
resp, err := c.client.R().
SetHeader("Content-Type", "application/json").
SetBody(req).
SetResult(&result).
Post("/coordinator/v1/submit_proof")
if err != nil {
return fmt.Errorf("submit proof request failed: %v", err)
}
if resp.StatusCode() != 200 {
return fmt.Errorf("failed to submit proof, status code: %v", resp.StatusCode())
}
if result.ErrCode == types.ErrJWTTokenExpired {
log.Debug("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
return fmt.Errorf("JWT expired, re-login failed: %v", err)
}
return c.SubmitProof(ctx, req)
}
if result.ErrCode != types.Success {
return fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
}
return nil
}

66
prover/client/types.go Normal file
View File

@@ -0,0 +1,66 @@
package client
import (
"scroll-tech/common/types/message"
)
// ChallengeResponse defines the response structure for random API
type ChallengeResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data *struct {
Time string `json:"time"`
Token string `json:"token"`
} `json:"data,omitempty"`
}
// LoginRequest defines the request structure for login API
type LoginRequest struct {
Message struct {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
} `json:"message"`
Signature string `json:"signature"`
}
// LoginResponse defines the response structure for login API
type LoginResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data *struct {
Time string `json:"time"`
Token string `json:"token"`
} `json:"data"`
}
// GetTaskRequest defines the request structure for GetTask API
type GetTaskRequest struct {
ProverHeight uint64 `json:"prover_height"`
TaskType message.ProofType `json:"task_type"`
}
// GetTaskResponse defines the response structure for GetTask API
type GetTaskResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
Data *struct {
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
TaskData string `json:"task_data"`
} `json:"data"`
}
// SubmitProofRequest defines the request structure for the SubmitProof API.
type SubmitProofRequest struct {
TaskID string `json:"task_id"`
TaskType int `json:"task_type"`
Status int `json:"status"`
Proof string `json:"proof"`
}
// SubmitProofResponse defines the response structure for the SubmitProof API.
type SubmitProofResponse struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
}

View File

@@ -1,6 +1,7 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
@@ -42,7 +43,7 @@ func action(ctx *cli.Context) error {
}
// Create prover
r, err := prover.NewProver(cfg)
r, err := prover.NewProver(context.Background(), cfg)
if err != nil {
return err
}
@@ -50,7 +51,9 @@ func action(ctx *cli.Context) error {
r.Start()
defer r.Stop()
log.Info("prover start successfully", "name", cfg.ProverName, "publickey", r.PublicKey(), "version", version.Version)
log.Info("prover start successfully",
"name", cfg.ProverName, "type", cfg.Core.ProofType,
"publickey", r.PublicKey(), "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -8,12 +8,15 @@ import (
"testing"
"time"
"github.com/google/uuid"
"github.com/scroll-tech/go-ethereum/rpc"
"golang.org/x/sync/errgroup"
proverConfig "scroll-tech/prover/config"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
@@ -43,18 +46,20 @@ type ProverApp struct {
}
// NewProverApp return a new proverApp manager.
func NewProverApp(base *docker.App, file string, wsURL string) *ProverApp {
proverFile := fmt.Sprintf("/tmp/%d_prover-config.json", base.Timestamp)
func NewProverApp(base *docker.App, file string, httpURL string, proofType message.ProofType) *ProverApp {
uuid := uuid.New().String()
proverFile := fmt.Sprintf("/tmp/%s_%d_prover-config.json", uuid, base.Timestamp)
proverApp := &ProverApp{
base: base,
originFile: file,
proverFile: proverFile,
bboltDB: fmt.Sprintf("/tmp/%d_bbolt_db", base.Timestamp),
bboltDB: fmt.Sprintf("/tmp/%s_%d_bbolt_db", uuid, base.Timestamp),
index: getIndex(),
name: string(utils.ProverApp),
args: []string{"--log.debug", "--config", proverFile},
}
if err := proverApp.MockConfig(true, wsURL); err != nil {
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
panic(err)
}
return proverApp
@@ -66,6 +71,13 @@ func (r *ProverApp) RunApp(t *testing.T, args ...string) {
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
}
// RunAppWithExpectedResult runs the prover-test child process with multiple parameters,
// and checks for a specific expected result in the output.
func (r *ProverApp) RunAppWithExpectedResult(t *testing.T, expectedResult string, args ...string) {
r.AppAPI = cmd.NewCmd(r.name, append(r.args, args...)...)
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, expectedResult) })
}
// Free stop and release prover-test.
func (r *ProverApp) Free() {
if !utils.IsNil(r.AppAPI) {
@@ -77,14 +89,15 @@ func (r *ProverApp) Free() {
}
// MockConfig creates a new prover config.
func (r *ProverApp) MockConfig(store bool, wsURL string) error {
func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.ProofType) error {
cfg, err := proverConfig.NewConfig(r.originFile)
if err != nil {
return err
}
cfg.ProverName = fmt.Sprintf("%s_%d", r.name, r.index)
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.base.Timestamp, cfg.ProverName)
cfg.TraceEndpoint = r.base.L2gethImg.Endpoint()
cfg.L2Geth.Endpoint = r.base.L2gethImg.Endpoint()
cfg.L2Geth.Confirmations = rpc.LatestBlockNumber
// Reuse l1geth's keystore file
cfg.KeystorePassword = "scrolltest"
cfg.DBPath = r.bboltDB
@@ -93,7 +106,11 @@ func (r *ProverApp) MockConfig(store bool, wsURL string) error {
if err != nil {
return err
}
cfg.CoordinatorURL = wsURL
cfg.Coordinator.BaseURL = httpURL
cfg.Coordinator.RetryCount = 10
cfg.Coordinator.RetryWaitTimeSec = 10
cfg.Coordinator.ConnectionTimeoutSec = 30
cfg.Core.ProofType = proofType
r.Config = cfg
if !store {
@@ -123,18 +140,6 @@ func (r ProverApps) RunApps(t *testing.T, args ...string) {
_ = eg.Wait()
}
// MockConfigs creates all the proverApps' configs.
func (r ProverApps) MockConfigs(store bool, wsURL string) error {
var eg errgroup.Group
for _, prover := range r {
prover := prover
eg.Go(func() error {
return prover.MockConfig(store, wsURL)
})
}
return eg.Wait()
}
// Free releases proverApps.
func (r ProverApps) Free() {
var wg sync.WaitGroup

View File

@@ -2,9 +2,18 @@
"prover_name": "my_prover",
"keystore_path": "keystore.json",
"keystore_password": "prover-pwd",
"coordinator_url": "ws://localhost:8391",
"db_path": "bbolt_db",
"core": {
"params_path": "params"
},
"coordinator": {
"base_url": "https://coordinator/v1",
"retry_count": 10,
"retry_wait_time_sec": 10,
"connection_timeout_sec": 30
},
"l2geth": {
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"confirmations": "0x1"
}
}

View File

@@ -5,20 +5,21 @@ import (
"os"
"path/filepath"
"scroll-tech/common/types/message"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/types/message"
)
// Config loads prover configuration items.
type Config struct {
ProverName string `json:"prover_name"`
KeystorePath string `json:"keystore_path"`
KeystorePassword string `json:"keystore_password"`
CoordinatorURL string `json:"coordinator_url"`
TraceEndpoint string `json:"trace_endpoint"`
Core *ProverCoreConfig `json:"core"`
DBPath string `json:"db_path"`
ProverName string `json:"prover_name"`
KeystorePath string `json:"keystore_path"`
KeystorePassword string `json:"keystore_password"`
Core *ProverCoreConfig `json:"core"`
DBPath string `json:"db_path"`
Coordinator *CoordinatorConfig `json:"coordinator"`
L2Geth *L2GethConfig `json:"l2geth"`
}
// ProverCoreConfig load zk prover config.
@@ -28,6 +29,20 @@ type ProverCoreConfig struct {
DumpDir string `json:"dump_dir,omitempty"`
}
// CoordinatorConfig represents the configuration for the Coordinator client.
type CoordinatorConfig struct {
BaseURL string `json:"base_url"`
RetryCount int `json:"retry_count"`
RetryWaitTimeSec int `json:"retry_wait_time_sec"`
ConnectionTimeoutSec int `json:"connection_timeout_sec"`
}
// L2GethConfig represents the configuration for the l2geth client.
type L2GethConfig struct {
Endpoint string `json:"endpoint"`
Confirmations rpc.BlockNumber `json:"confirmations"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
buf, err := os.ReadFile(filepath.Clean(file))

View File

@@ -3,6 +3,8 @@ module scroll-tech/prover
go 1.19
require (
github.com/go-resty/resty/v2 v2.7.0
github.com/google/uuid v1.3.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
@@ -19,7 +21,6 @@ require (
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/huin/goupnp v1.0.3 // indirect
@@ -43,6 +44,7 @@ require (
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect

View File

@@ -25,6 +25,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -119,6 +121,9 @@ golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
@@ -127,15 +132,20 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -3,29 +3,27 @@ package prover
import (
"context"
"crypto/ecdsa"
"encoding/json"
"errors"
"fmt"
"sort"
"sync/atomic"
"time"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/client"
"scroll-tech/prover/client"
"scroll-tech/prover/config"
"scroll-tech/prover/core"
"scroll-tech/prover/store"
putils "scroll-tech/prover/utils"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
var (
@@ -35,23 +33,21 @@ var (
// Prover contains websocket conn to coordinator, and task stack.
type Prover struct {
cfg *config.Config
client *client.Client
traceClient *ethclient.Client
stack *store.Stack
proverCore *core.ProverCore
taskChan chan *message.TaskMsg
sub ethereum.Subscription
ctx context.Context
cfg *config.Config
coordinatorClient *client.CoordinatorClient
l2GethClient *ethclient.Client
stack *store.Stack
proverCore *core.ProverCore
isDisconnected int64
isClosed int64
stopChan chan struct{}
isClosed int64
stopChan chan struct{}
priv *ecdsa.PrivateKey
}
// NewProver new a Prover object.
func NewProver(cfg *config.Config) (*Prover, error) {
func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
// load or create wallet
priv, err := utils.LoadOrCreateKey(cfg.KeystorePath, cfg.KeystorePassword)
if err != nil {
@@ -65,7 +61,7 @@ func NewProver(cfg *config.Config) (*Prover, error) {
}
// Collect geth node.
traceClient, err := ethclient.Dial(cfg.TraceEndpoint)
l2GethClient, err := ethclient.DialContext(ctx, cfg.L2Geth.Endpoint)
if err != nil {
return nil, err
}
@@ -78,21 +74,20 @@ func NewProver(cfg *config.Config) (*Prover, error) {
}
log.Info("init prover_core successfully!")
rClient, err := client.Dial(cfg.CoordinatorURL)
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv)
if err != nil {
return nil, err
}
return &Prover{
cfg: cfg,
client: rClient,
traceClient: traceClient,
stack: stackDb,
proverCore: newProverCore,
sub: nil,
taskChan: make(chan *message.TaskMsg, 10),
stopChan: make(chan struct{}),
priv: priv,
ctx: ctx,
cfg: cfg,
coordinatorClient: coordinatorClient,
l2GethClient: l2GethClient,
stack: stackDb,
proverCore: newProverCore,
stopChan: make(chan struct{}),
priv: priv,
}, nil
}
@@ -108,85 +103,15 @@ func (r *Prover) PublicKey() string {
// Start runs Prover.
func (r *Prover) Start() {
log.Info("start to register to coordinator")
if err := r.Register(); err != nil {
log.Crit("register to coordinator failed", "error", err)
log.Info("start to login to coordinator")
if err := r.coordinatorClient.Login(r.ctx); err != nil {
log.Crit("login to coordinator failed", "error", err)
}
log.Info("register to coordinator successfully!")
log.Info("login to coordinator successfully!")
go r.HandleCoordinator()
go r.ProveLoop()
}
// Register registers Prover to the coordinator through Websocket.
func (r *Prover) Register() error {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.cfg.ProverName,
ProverType: r.Type(),
Version: version.Version,
},
}
// Sign request token message
if err := authMsg.SignWithKey(r.priv); err != nil {
return fmt.Errorf("sign request token message failed %v", err)
}
token, err := r.client.RequestToken(context.Background(), authMsg)
if err != nil {
return fmt.Errorf("request token failed %v", err)
}
authMsg.Identity.Token = token
// Sign auth message
if err = authMsg.SignWithKey(r.priv); err != nil {
return fmt.Errorf("sign auth message failed %v", err)
}
sub, err := r.client.RegisterAndSubscribe(context.Background(), r.taskChan, authMsg)
r.sub = sub
return err
}
// HandleCoordinator accepts block-traces from coordinator through the Websocket and store it into Stack.
func (r *Prover) HandleCoordinator() {
for {
select {
case <-r.stopChan:
return
case task := <-r.taskChan:
log.Info("Accept BlockTrace from Scroll", "ID", task.ID)
err := r.stack.Push(&store.ProvingTask{Task: task, Times: 0})
if err != nil {
panic(fmt.Sprintf("could not push task(%s) into stack: %v", task.ID, err))
}
case err := <-r.sub.Err():
r.sub.Unsubscribe()
log.Error("Subscribe task with scroll failed", "error", err)
if atomic.LoadInt64(&r.isClosed) == 0 {
r.mustRetryCoordinator()
}
}
}
}
func (r *Prover) mustRetryCoordinator() {
atomic.StoreInt64(&r.isDisconnected, 1)
defer atomic.StoreInt64(&r.isDisconnected, 0)
for {
log.Info("retry to connect to coordinator...")
err := r.Register()
if err != nil {
log.Error("register to coordinator failed", "error", err)
time.Sleep(retryWait)
} else {
log.Info("re-register to coordinator successfully!")
break
}
}
}
// ProveLoop keep popping the block-traces from Stack and sends it to rust-prover for loop.
func (r *Prover) ProveLoop() {
for {
@@ -195,12 +120,7 @@ func (r *Prover) ProveLoop() {
return
default:
if err := r.proveAndSubmit(); err != nil {
if errors.Is(err, store.ErrEmpty) {
log.Debug("get empty trace", "error", err)
time.Sleep(time.Second * 3)
continue
}
log.Error("prove failed", "error", err)
log.Error("proveAndSubmit", "prover type", r.cfg.Core.ProofType, "error", err)
}
}
}
@@ -209,14 +129,27 @@ func (r *Prover) ProveLoop() {
func (r *Prover) proveAndSubmit() error {
task, err := r.stack.Peek()
if err != nil {
return err
if !errors.Is(err, store.ErrEmpty) {
return fmt.Errorf("failed to peek from stack: %v", err)
}
// fetch new proving task.
task, err = r.fetchTaskFromCoordinator()
if err != nil {
time.Sleep(retryWait)
return fmt.Errorf("failed to fetch task from coordinator: %v", err)
}
// Push the new task into the stack
if err = r.stack.Push(task); err != nil {
return fmt.Errorf("failed to push task into stack: %v", err)
}
}
var proofMsg *message.ProofDetail
if task.Times <= 2 {
// If panic times <= 2, try to proof the task.
if err = r.stack.UpdateTimes(task, task.Times+1); err != nil {
return err
return fmt.Errorf("failed to update times on stack: %v", err)
}
log.Info("start to prove task", "task-type", task.Task.Type, "task-id", task.Task.ID)
@@ -239,8 +172,70 @@ func (r *Prover) proveAndSubmit() error {
}
}()
r.signAndSubmitProof(proofMsg)
return nil
return r.submitProof(proofMsg)
}
// fetchTaskFromCoordinator fetches a new task from the server
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// get the latest confirmed block number
latestBlockNumber, err := putils.GetLatestConfirmedBlockNumber(r.ctx, r.l2GethClient, r.cfg.L2Geth.Confirmations)
if err != nil {
return nil, fmt.Errorf("failed to fetch latest confirmed block number: %v", err)
}
if latestBlockNumber == 0 {
return nil, fmt.Errorf("omit to prove task of the genesis block, latestBlockNumber: %v", latestBlockNumber)
}
// prepare the request
req := &client.GetTaskRequest{
ProverHeight: latestBlockNumber,
TaskType: r.Type(),
}
// send the request
resp, err := r.coordinatorClient.GetTask(r.ctx, req)
if err != nil {
return nil, fmt.Errorf("failed to get task, req: %v, err: %v", req, err)
}
// create a new TaskMsg
taskMsg := message.TaskMsg{
ID: resp.Data.TaskID,
Type: message.ProofType(resp.Data.TaskType),
}
// depending on the task type, unmarshal the task data into the appropriate field
switch taskMsg.Type {
case message.ProofTypeBatch:
taskMsg.BatchTaskDetail = &message.BatchTaskDetail{}
if err = json.Unmarshal([]byte(resp.Data.TaskData), taskMsg.BatchTaskDetail); err != nil {
return nil, fmt.Errorf("failed to unmarshal batch task detail: %v", err)
}
case message.ProofTypeChunk:
taskMsg.ChunkTaskDetail = &message.ChunkTaskDetail{}
if err = json.Unmarshal([]byte(resp.Data.TaskData), taskMsg.ChunkTaskDetail); err != nil {
return nil, fmt.Errorf("failed to unmarshal chunk task detail: %v", err)
}
default:
return nil, fmt.Errorf("unknown task type: %v", taskMsg.Type)
}
// convert the response task to a ProvingTask
provingTask := &store.ProvingTask{
Task: &taskMsg,
Times: 0,
}
// marshal the task to a json string for logging
taskJSON, err := json.Marshal(provingTask)
if err != nil {
return nil, fmt.Errorf("failed to marshal task to json: %v", err)
}
log.Info("successfully fetched new task from coordinator", "resp", resp, "task", string(taskJSON))
return provingTask, nil
}
func (r *Prover) prove(task *store.ProvingTask) (detail *message.ProofDetail) {
@@ -283,58 +278,85 @@ func (r *Prover) prove(task *store.ProvingTask) (detail *message.ProofDetail) {
func (r *Prover) proveChunk(task *store.ProvingTask) (*message.ChunkProof, error) {
if task.Task.ChunkTaskDetail == nil {
return nil, errors.New("ChunkTaskDetail is empty")
return nil, fmt.Errorf("ChunkTaskDetail is empty")
}
traces, err := r.getSortedTracesByHashes(task.Task.ChunkTaskDetail.BlockHashes)
if err != nil {
return nil, errors.New("get traces from eth node failed")
return nil, fmt.Errorf("get traces from eth node failed, block hashes: %v", task.Task.ChunkTaskDetail.BlockHashes)
}
return r.proverCore.ProveChunk(task.Task.ID, traces)
}
func (r *Prover) proveBatch(task *store.ProvingTask) (*message.BatchProof, error) {
if task.Task.BatchTaskDetail == nil {
return nil, errors.New("BatchTaskDetail is empty")
return nil, fmt.Errorf("BatchTaskDetail is empty")
}
return r.proverCore.ProveBatch(task.Task.ID, task.Task.BatchTaskDetail.ChunkInfos, task.Task.BatchTaskDetail.ChunkProofs)
}
func (r *Prover) signAndSubmitProof(msg *message.ProofDetail) {
authZkProof := &message.ProofMsg{ProofDetail: msg}
if err := authZkProof.Sign(r.priv); err != nil {
log.Error("sign proof error", "err", err)
return
func (r *Prover) submitProof(msg *message.ProofDetail) error {
// prepare the submit request
req := &client.SubmitProofRequest{
TaskID: msg.ID,
TaskType: int(msg.Type),
Status: int(msg.Status),
}
// Retry SubmitProof several times.
for i := 0; i < 3; i++ {
// When the prover is disconnected from the coordinator,
// wait until the prover reconnects to the coordinator.
for atomic.LoadInt64(&r.isDisconnected) == 1 {
time.Sleep(retryWait)
// marshal proof by tasktype
switch msg.Type {
case message.ProofTypeChunk:
if msg.ChunkProof != nil {
proofData, err := json.Marshal(msg.ChunkProof)
if err != nil {
return fmt.Errorf("error marshaling chunk proof: %v", err)
}
req.Proof = string(proofData)
}
serr := r.client.SubmitProof(context.Background(), authZkProof)
if serr == nil {
return
case message.ProofTypeBatch:
if msg.BatchProof != nil {
proofData, err := json.Marshal(msg.BatchProof)
if err != nil {
return fmt.Errorf("error marshaling batch proof: %v", err)
}
req.Proof = string(proofData)
}
log.Error("submit proof to coordinator error", "task ID", msg.ID, "error", serr)
}
// send the submit request
if err := r.coordinatorClient.SubmitProof(r.ctx, req); err != nil {
return fmt.Errorf("error submitting proof: %v", err)
}
log.Info("proof submitted successfully", "task-id", msg.ID, "task-type", msg.Type, "task-status", msg.Status, "err", msg.Error)
return nil
}
func (r *Prover) getSortedTracesByHashes(blockHashes []common.Hash) ([]*types.BlockTrace, error) {
if len(blockHashes) == 0 {
return nil, fmt.Errorf("blockHashes is empty")
}
var traces []*types.BlockTrace
for _, blockHash := range blockHashes {
trace, err := r.traceClient.GetBlockTraceByHash(context.Background(), blockHash)
trace, err := r.l2GethClient.GetBlockTraceByHash(r.ctx, blockHash)
if err != nil {
return nil, err
}
traces = append(traces, trace)
}
// Sort BlockTraces by header number.
// TODO: we should check that the number range here is continuous.
sort.Slice(traces, func(i, j int) bool {
return traces[i].Header.Number.Int64() < traces[j].Header.Number.Int64()
})
// Check that the block numbers are continuous
for i := 0; i < len(traces)-1; i++ {
if traces[i].Header.Number.Int64()+1 != traces[i+1].Header.Number.Int64() {
return nil, fmt.Errorf("block numbers are not continuous, got %v and %v",
traces[i].Header.Number.Int64(), traces[i+1].Header.Number.Int64())
}
}
return traces, nil
}
@@ -346,8 +368,6 @@ func (r *Prover) Stop() {
atomic.StoreInt64(&r.isClosed, 1)
close(r.stopChan)
// Close scroll's ws
r.sub.Unsubscribe()
// Close db
if err := r.stack.Close(); err != nil {
log.Error("failed to close bbolt db", "error", err)

View File

@@ -3,6 +3,7 @@ package store
import (
"encoding/json"
"errors"
"fmt"
"github.com/scroll-tech/go-ethereum/log"
"go.etcd.io/bbolt"
@@ -89,12 +90,12 @@ func (s *Stack) Delete(taskID string) error {
})
}
// UpdateTimes udpates the prover prove times of the proving task.
func (s *Stack) UpdateTimes(task *ProvingTask, udpateTimes int) error {
task.Times = udpateTimes
// UpdateTimes updates the prover prove times of the proving task.
func (s *Stack) UpdateTimes(task *ProvingTask, updateTimes int) error {
task.Times = updateTimes
byt, err := json.Marshal(task)
if err != nil {
return err
return fmt.Errorf("error marshaling task: %v", err)
}
key := []byte(task.Task.ID)
return s.Update(func(tx *bbolt.Tx) error {

56
prover/utils/utils.go Normal file
View File

@@ -0,0 +1,56 @@
package utils
import (
"context"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/rpc"
)
type ethClient interface {
BlockNumber(ctx context.Context) (uint64, error)
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
}
// GetLatestConfirmedBlockNumber get confirmed block number by rpc.BlockNumber type.
func GetLatestConfirmedBlockNumber(ctx context.Context, client ethClient, confirm rpc.BlockNumber) (uint64, error) {
switch true {
case confirm == rpc.SafeBlockNumber || confirm == rpc.FinalizedBlockNumber:
var tag *big.Int
if confirm == rpc.FinalizedBlockNumber {
tag = big.NewInt(int64(rpc.FinalizedBlockNumber))
} else {
tag = big.NewInt(int64(rpc.SafeBlockNumber))
}
header, err := client.HeaderByNumber(ctx, tag)
if err != nil {
return 0, fmt.Errorf("client.HeaderByNumber failed: tag %v, err %v", tag, err)
}
if !header.Number.IsInt64() {
return 0, fmt.Errorf("received invalid block confirm: %v", header.Number)
}
return header.Number.Uint64(), nil
case confirm == rpc.LatestBlockNumber:
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
return number, nil
case confirm.Int64() >= 0: // If it's positive integer, consider it as a certain confirm value.
number, err := client.BlockNumber(ctx)
if err != nil {
return 0, err
}
cfmNum := uint64(confirm.Int64())
if number >= cfmNum {
return number - cfmNum, nil
}
return 0, nil
default:
return 0, fmt.Errorf("unknown confirmation type: %v", confirm)
}
}

View File

@@ -5,6 +5,7 @@ go 1.19
require (
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
github.com/stretchr/testify v1.8.3
gorm.io/gorm v1.25.2
)
require (
@@ -19,6 +20,8 @@ require (
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect

View File

@@ -39,6 +39,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4=
github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@@ -104,3 +106,4 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=

View File

@@ -1,87 +1,131 @@
package integration_test
import (
"crypto/rand"
"io/ioutil"
"context"
"log"
"math/big"
"net/http"
"strconv"
"strings"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
bcmd "scroll-tech/bridge/cmd"
"scroll-tech/common/docker"
"scroll-tech/integration-test/orm"
rapp "scroll-tech/prover/cmd/app"
"scroll-tech/database/migrate"
capp "scroll-tech/coordinator/cmd/app"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
bcmd "scroll-tech/bridge/cmd"
)
var (
base *docker.App
bridgeApp *bcmd.MockApp
coordinatorApp *capp.CoordinatorApp
proverApp *rapp.ProverApp
chunkProverApp *rapp.ProverApp
batchProverApp *rapp.ProverApp
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/conf/config.json")
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
proverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.WSEndpoint())
chunkProverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.HTTPEndpoint(), message.ProofTypeChunk)
batchProverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.HTTPEndpoint(), message.ProofTypeBatch)
m.Run()
bridgeApp.Free()
coordinatorApp.Free()
proverApp.Free()
chunkProverApp.Free()
batchProverApp.Free()
base.Free()
}
func TestStartProcess(t *testing.T) {
// Start l1geth l2geth and postgres docker containers.
base.RunImages(t)
// Reset db.
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
func TestCoordinatorProverInteraction(t *testing.T) {
// Start postgres docker containers.
base.RunL2Geth(t)
base.RunDBImage(t)
// Init data
dbCfg := &database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
db, err := database.InitDB(dbCfg)
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
batchOrm := orm.NewBatch(db)
chunkOrm := orm.NewChunk(db)
l2BlockOrm := orm.NewL2Block(db)
// Connect to l2geth client
l2Client, err := base.L2Client()
if err != nil {
log.Fatalf("Failed to connect to the l2geth client: %v", err)
}
var header *gethTypes.Header
success := utils.TryTimes(10, func() bool {
header, err = l2Client.HeaderByNumber(context.Background(), big.NewInt(1))
if err != nil {
log.Printf("Failed to retrieve L2 genesis header: %v. Retrying...", err)
return false
}
return true
})
if !success {
log.Fatalf("Failed to retrieve L2 genesis header after multiple attempts: %v", err)
}
wrappedBlock := &types.WrappedBlock{
Header: header,
Transactions: nil,
WithdrawRoot: common.Hash{},
RowConsumption: &gethTypes.RowConsumption{},
}
chunk := &types.Chunk{Blocks: []*types.WrappedBlock{wrappedBlock}}
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock})
assert.NoError(t, err)
dbChunk, err := chunkOrm.InsertChunk(context.Background(), chunk)
assert.NoError(t, err)
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 100, dbChunk.Hash)
assert.NoError(t, err)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk})
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
// Run coordinator app.
coordinatorApp.RunApp(t)
// Run prover app.
proverApp.RunApp(t)
chunkProverApp.RunAppWithExpectedResult(t, "proof submitted successfully") // chunk prover login -> get task -> submit proof.
batchProverApp.RunAppWithExpectedResult(t, "proof submitted successfully") // batch prover login -> get task -> submit proof.
// All task has been proven, coordinator would not return any task.
chunkProverApp.ExpectWithTimeout(t, false, 60*time.Second, "get empty prover task")
batchProverApp.ExpectWithTimeout(t, false, 60*time.Second, "get empty prover task")
// Free apps.
proverApp.WaitExit()
coordinatorApp.WaitExit()
}
func TestMonitorMetrics(t *testing.T) {
// Start l1geth l2geth and postgres docker containers.
base.RunImages(t)
// Reset db.
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
// Start coordinator process with metrics server.
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
svrPort := strconv.FormatInt(port.Int64()+52000, 10)
coordinatorApp.RunApp(t, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
time.Sleep(time.Second)
// Get coordinator monitor metrics.
resp, err := http.Get("http://localhost:" + svrPort)
assert.NoError(t, err)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
bodyStr := string(body)
assert.Equal(t, 200, resp.StatusCode)
assert.Equal(t, true, strings.Contains(bodyStr, "coordinator_sessions_timeout_total"))
assert.Equal(t, true, strings.Contains(bodyStr, "coordinator_provers_disconnects_total"))
// Exit.
chunkProverApp.WaitExit()
batchProverApp.WaitExit()
coordinatorApp.WaitExit()
}

View File

@@ -0,0 +1,158 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
const defaultBatchHeaderVersion = 0
// Batch represents a batch of chunks.
type Batch struct {
db *gorm.DB `gorm:"column:-"`
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
EndChunkHash string `json:"end_chunk_hash" gorm:"column:end_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"`
BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"`
// proof
ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// rollup
RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"`
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
// gas oracle
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewBatch creates a new Batch database instance.
func NewBatch(db *gorm.DB) *Batch {
return &Batch{db: db}
}
// TableName returns the table name for the Batch model.
func (*Batch) TableName() string {
return "batch"
}
// GetLatestBatch retrieves the latest batch from the database.
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Order("index desc")
var latestBatch Batch
if err := db.First(&latestBatch).Error; err != nil {
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
}
return &latestBatch, nil
}
// InsertBatch inserts a new batch into the database.
// for init data
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
if len(chunks) == 0 {
return nil, errors.New("invalid args")
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get the latest batch", "err", err)
return nil, err
}
var batchIndex uint64
var parentBatchHash common.Hash
var totalL1MessagePoppedBefore uint64
var version uint8 = defaultBatchHeaderVersion
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
// not batch record in the db, we then use default empty values for the creating batch;
// if parentBatch!=nil then err=nil, then we fill the parentBatch-related data into the creating batch
if parentBatch != nil {
batchIndex = parentBatch.Index + 1
parentBatchHash = common.HexToHash(parentBatch.Hash)
var parentBatchHeader *types.BatchHeader
parentBatchHeader, err = types.DecodeBatchHeader(parentBatch.BatchHeader)
if err != nil {
log.Error("failed to decode parent batch header", "index", parentBatch.Index, "hash", parentBatch.Hash, "err", err)
return nil, err
}
totalL1MessagePoppedBefore = parentBatchHeader.TotalL1MessagePopped()
version = parentBatchHeader.Version()
}
batchHeader, err := types.NewBatchHeader(version, batchIndex, totalL1MessagePoppedBefore, parentBatchHash, chunks)
if err != nil {
log.Error("failed to create batch header",
"index", batchIndex, "total l1 message popped before", totalL1MessagePoppedBefore,
"parent hash", parentBatchHash, "number of chunks", len(chunks), "err", err)
return nil, err
}
numChunks := len(chunks)
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
newBatch := Batch{
Index: batchIndex,
Hash: batchHeader.Hash().Hex(),
StartChunkHash: startChunkHash,
StartChunkIndex: startChunkIndex,
EndChunkHash: endChunkHash,
EndChunkIndex: endChunkIndex,
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
ParentBatchHash: parentBatchHash.Hex(),
BatchHeader: batchHeader.Encode(),
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db.WithContext(ctx)
db = db.Model(&Batch{})
if err := db.Create(&newBatch).Error; err != nil {
log.Error("failed to insert batch", "batch", newBatch, "err", err)
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
}
return &newBatch, nil
}

View File

@@ -0,0 +1,169 @@
package orm
import (
"context"
"errors"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// Chunk represents a chunk of blocks in the database.
type Chunk struct {
db *gorm.DB `gorm:"-"`
// chunk
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
StartBlockTime uint64 `json:"start_block_time" gorm:"column:start_block_time"`
TotalL1MessagesPoppedBefore uint64 `json:"total_l1_messages_popped_before" gorm:"column:total_l1_messages_popped_before"`
TotalL1MessagesPoppedInChunk uint32 `json:"total_l1_messages_popped_in_chunk" gorm:"column:total_l1_messages_popped_in_chunk"`
ParentChunkHash string `json:"parent_chunk_hash" gorm:"column:parent_chunk_hash"`
StateRoot string `json:"state_root" gorm:"column:state_root"`
ParentChunkStateRoot string `json:"parent_chunk_state_root" gorm:"column:parent_chunk_state_root"`
WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"`
// proof
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"`
Proof []byte `json:"proof" gorm:"column:proof;default:NULL"`
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"`
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint32 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewChunk creates a new Chunk database instance.
func NewChunk(db *gorm.DB) *Chunk {
return &Chunk{db: db}
}
// TableName returns the table name for the chunk model.
func (*Chunk) TableName() string {
return "chunk"
}
// GetLatestChunk retrieves the latest chunk from the database.
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Order("index desc")
var latestChunk Chunk
if err := db.First(&latestChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetLatestChunk error: %w", err)
}
return &latestChunk, nil
}
// InsertChunk inserts a new chunk into the database.
// for init data
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
var chunkIndex uint64
var totalL1MessagePoppedBefore uint64
var parentChunkHash string
var parentChunkStateRoot string
parentChunk, err := o.GetLatestChunk(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
log.Error("failed to get latest chunk", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
// if parentChunk==nil then err==gorm.ErrRecordNotFound, which means there's
// not chunk record in the db, we then use default empty values for the creating chunk;
// if parentChunk!=nil then err=nil, then we fill the parentChunk-related data into the creating chunk
if parentChunk != nil {
chunkIndex = parentChunk.Index + 1
totalL1MessagePoppedBefore = parentChunk.TotalL1MessagesPoppedBefore + uint64(parentChunk.TotalL1MessagesPoppedInChunk)
parentChunkHash = parentChunk.Hash
parentChunkStateRoot = parentChunk.StateRoot
}
hash, err := chunk.Hash(totalL1MessagePoppedBefore)
if err != nil {
log.Error("failed to get chunk hash", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
var totalL2TxGas uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
}
numBlocks := len(chunk.Blocks)
newChunk := Chunk{
Index: chunkIndex,
Hash: hash.Hex(),
StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(),
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
TotalL2TxGas: totalL2TxGas,
TotalL2TxNum: uint32(totalL2TxNum),
TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize),
TotalL1CommitGas: totalL1CommitGas,
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),
ParentChunkHash: parentChunkHash,
StateRoot: chunk.Blocks[numBlocks-1].Header.Root.Hex(),
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
}
return &newChunk, nil
}
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices.
// for unit test
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string) error {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
if err := db.Update("batch_hash", batchHash).Error; err != nil {
return fmt.Errorf("Chunk.UpdateBatchHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, batchHash)
}
return nil
}

View File

@@ -0,0 +1,112 @@
package orm
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/types"
)
// L2Block represents a l2 block in the database.
type L2Block struct {
db *gorm.DB `gorm:"column:-"`
// block
Number uint64 `json:"number" gorm:"number"`
Hash string `json:"hash" gorm:"hash"`
ParentHash string `json:"parent_hash" gorm:"parent_hash"`
Header string `json:"header" gorm:"header"`
Transactions string `json:"transactions" gorm:"transactions"`
WithdrawRoot string `json:"withdraw_root" gorm:"withdraw_root"`
StateRoot string `json:"state_root" gorm:"state_root"`
TxNum uint32 `json:"tx_num" gorm:"tx_num"`
GasUsed uint64 `json:"gas_used" gorm:"gas_used"`
BlockTimestamp uint64 `json:"block_timestamp" gorm:"block_timestamp"`
RowConsumption string `json:"row_consumption" gorm:"row_consumption"`
// chunk
ChunkHash string `json:"chunk_hash" gorm:"chunk_hash;default:NULL"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
}
// NewL2Block creates a new L2Block instance.
func NewL2Block(db *gorm.DB) *L2Block {
return &L2Block{db: db}
}
// TableName returns the name of the "l2_block" table.
func (*L2Block) TableName() string {
return "l2_block"
}
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
// for unit test
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*types.WrappedBlock) error {
var l2Blocks []L2Block
for _, block := range blocks {
header, err := json.Marshal(block.Header)
if err != nil {
log.Error("failed to marshal block header", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
txs, err := json.Marshal(block.Transactions)
if err != nil {
log.Error("failed to marshal transactions", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
rc, err := json.Marshal(block.RowConsumption)
if err != nil {
log.Error("failed to marshal RowConsumption", "hash", block.Header.Hash().String(), "err", err)
return fmt.Errorf("L2Block.InsertL2Blocks error: %w, block hash: %v", err, block.Header.Hash().String())
}
l2Block := L2Block{
Number: block.Header.Number.Uint64(),
Hash: block.Header.Hash().String(),
ParentHash: block.Header.ParentHash.String(),
Transactions: string(txs),
WithdrawRoot: block.WithdrawRoot.Hex(),
StateRoot: block.Header.Root.Hex(),
TxNum: uint32(len(block.Transactions)),
GasUsed: block.Header.GasUsed,
BlockTimestamp: block.Header.Time,
Header: string(header),
RowConsumption: string(rc),
}
l2Blocks = append(l2Blocks, l2Block)
}
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
if err := db.Create(&l2Blocks).Error; err != nil {
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
}
return nil
}
// UpdateChunkHashInRange updates the chunk hash for l2 blocks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices.
// for unit test
func (o *L2Block) UpdateChunkHashInRange(ctx context.Context, startNumber uint64, endNumber uint64, chunkHash string) error {
db := o.db.WithContext(ctx)
db = db.Model(&L2Block{})
db = db.Where("number >= ? AND number <= ?", startNumber, endNumber)
if err := db.Update("chunk_hash", chunkHash).Error; err != nil {
return fmt.Errorf("L2Block.UpdateChunkHashInRange error: %w, start number: %v, end number: %v, chunk hash: %v",
err, startNumber, endNumber, chunkHash)
}
return nil
}