mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcd9764bcd | ||
|
|
b4f8377a08 | ||
|
|
b52d43caa8 | ||
|
|
201bf401cd | ||
|
|
898ac1d25c | ||
|
|
1336b89fb8 | ||
|
|
73045df037 | ||
|
|
b3093e9eb6 | ||
|
|
3d5250e52d | ||
|
|
b7324c76bc | ||
|
|
6d6e98bd6e | ||
|
|
9e35ce0ab4 | ||
|
|
b86ebaefaf | ||
|
|
78a4298eda | ||
|
|
49d8387714 | ||
|
|
af2913903b | ||
|
|
f8a7d70872 |
@@ -23,6 +23,7 @@ type FetcherConfig struct {
|
||||
DAIGatewayAddr string `json:"DAIGatewayAddr"`
|
||||
USDCGatewayAddr string `json:"USDCGatewayAddr"`
|
||||
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
|
||||
PufferGatewayAddr string `json:"PufferGatewayAddr"`
|
||||
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
|
||||
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
|
||||
@@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
|
||||
@@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
@@ -1,62 +1,27 @@
|
||||
package database
|
||||
package database_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestGormLogger(t *testing.T) {
|
||||
output := io.Writer(os.Stderr)
|
||||
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
|
||||
if usecolor {
|
||||
output = colorable.NewColorableStderr()
|
||||
}
|
||||
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
|
||||
glogger := log.NewGlogHandler(ostream)
|
||||
// Set log level
|
||||
glogger.Verbosity(log.LvlTrace)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var gl gormLogger
|
||||
gl.gethLogger = log.Root()
|
||||
|
||||
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
|
||||
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
|
||||
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
|
||||
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
version.Version = "v4.1.98-aaa-bbb-ccc"
|
||||
base := docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
dbCfg := &Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
testApps := testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err := InitDB(dbCfg)
|
||||
db, err := testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
sqlDB, err := Ping(db)
|
||||
sqlDB, err := database.Ping(db)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, sqlDB)
|
||||
|
||||
assert.NoError(t, CloseDB(db))
|
||||
assert.NoError(t, database.CloseDB(db))
|
||||
}
|
||||
|
||||
35
common/database/logger_test.go
Normal file
35
common/database/logger_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func TestGormLogger(t *testing.T) {
|
||||
output := io.Writer(os.Stderr)
|
||||
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
|
||||
if usecolor {
|
||||
output = colorable.NewColorableStderr()
|
||||
}
|
||||
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
|
||||
glogger := log.NewGlogHandler(ostream)
|
||||
// Set log level
|
||||
glogger.Verbosity(log.LvlTrace)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var gl gormLogger
|
||||
gl.gethLogger = log.Root()
|
||||
|
||||
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
|
||||
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
|
||||
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
|
||||
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
|
||||
}
|
||||
@@ -1,196 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
l1StartPort = 10000
|
||||
l2StartPort = 20000
|
||||
dbStartPort = 30000
|
||||
)
|
||||
|
||||
// AppAPI app interface.
|
||||
type AppAPI interface {
|
||||
IsRunning() bool
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
|
||||
}
|
||||
|
||||
// App is collection struct of runtime docker images
|
||||
type App struct {
|
||||
L1gethImg GethImgInstance
|
||||
L2gethImg GethImgInstance
|
||||
DBImg ImgInstance
|
||||
|
||||
dbClient *sql.DB
|
||||
DBConfig *database.DBConfig
|
||||
DBConfigFile string
|
||||
|
||||
// common time stamp.
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewDockerApp returns new instance of dockerApp struct
|
||||
func NewDockerApp() *App {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
app := &App{
|
||||
Timestamp: timestamp,
|
||||
L1gethImg: newTestL1Docker(),
|
||||
L2gethImg: newTestL2Docker(),
|
||||
DBImg: newTestDBDocker("postgres"),
|
||||
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
|
||||
}
|
||||
if err := app.mockDBConfig(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
// RunImages runs all images togather
|
||||
func (b *App) RunImages(t *testing.T) {
|
||||
b.RunDBImage(t)
|
||||
b.RunL1Geth(t)
|
||||
b.RunL2Geth(t)
|
||||
}
|
||||
|
||||
// RunDBImage starts postgres docker container.
|
||||
func (b *App) RunDBImage(t *testing.T) {
|
||||
if b.DBImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.DBImg.Start())
|
||||
|
||||
// try 5 times until the db is ready.
|
||||
ok := utils.TryTimes(10, func() bool {
|
||||
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
|
||||
return err == nil && db != nil && db.Ping() == nil
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
// Free clear all running images, double check and recycle docker container.
|
||||
func (b *App) Free() {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
_ = b.L1gethImg.Stop()
|
||||
}
|
||||
if b.L2gethImg.IsRunning() {
|
||||
_ = b.L2gethImg.Stop()
|
||||
}
|
||||
if b.DBImg.IsRunning() {
|
||||
_ = b.DBImg.Stop()
|
||||
_ = os.Remove(b.DBConfigFile)
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
_ = b.dbClient.Close()
|
||||
b.dbClient = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunL1Geth starts l1geth docker container.
|
||||
func (b *App) RunL1Geth(t *testing.T) {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.L1gethImg.Start())
|
||||
}
|
||||
|
||||
// L1Client returns a ethclient by dialing running l1geth
|
||||
func (b *App) L1Client() (*ethclient.Client, error) {
|
||||
if utils.IsNil(b.L1gethImg) {
|
||||
return nil, fmt.Errorf("l1 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// RunL2Geth starts l2geth docker container.
|
||||
func (b *App) RunL2Geth(t *testing.T) {
|
||||
if b.L2gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.L2gethImg.Start())
|
||||
}
|
||||
|
||||
// L2Client returns a ethclient by dialing running l2geth
|
||||
func (b *App) L2Client() (*ethclient.Client, error) {
|
||||
if utils.IsNil(b.L2gethImg) {
|
||||
return nil, fmt.Errorf("l2 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// DBClient create and return *sql.DB instance.
|
||||
func (b *App) DBClient(t *testing.T) *sql.DB {
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
return b.dbClient
|
||||
}
|
||||
var (
|
||||
cfg = b.DBConfig
|
||||
err error
|
||||
)
|
||||
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
|
||||
assert.NoError(t, err)
|
||||
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
|
||||
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
|
||||
assert.NoError(t, b.dbClient.Ping())
|
||||
return b.dbClient
|
||||
}
|
||||
|
||||
func (b *App) mockDBConfig() error {
|
||||
b.DBConfig = &database.DBConfig{
|
||||
DSN: "",
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
|
||||
if b.DBImg != nil {
|
||||
b.DBConfig.DSN = b.DBImg.Endpoint()
|
||||
}
|
||||
data, err := json.Marshal(b.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
|
||||
}
|
||||
|
||||
func newTestL1Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestL2Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestDBDocker(driverName string) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgDB the postgres image manager.
|
||||
type ImgDB struct {
|
||||
image string
|
||||
name string
|
||||
id string
|
||||
|
||||
dbName string
|
||||
port int
|
||||
password string
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgDB return postgres db img instance.
|
||||
func NewImgDB(image, password, dbName string, port int) ImgInstance {
|
||||
img := &ImgDB{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
|
||||
password: password,
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start postgres db container.
|
||||
func (i *ImgDB) Start() error {
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the container.
|
||||
func (i *ImgDB) Stop() error {
|
||||
if !i.running {
|
||||
return nil
|
||||
}
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// stop the running container.
|
||||
if i.id == "" {
|
||||
i.id = GetContainerID(i.name)
|
||||
}
|
||||
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
// Endpoint return the dsn.
|
||||
func (i *ImgDB) Endpoint() string {
|
||||
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgDB) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
}
|
||||
|
||||
cmd = append(cmd, envs...)
|
||||
return append(cmd, i.image)
|
||||
}
|
||||
|
||||
func (i *ImgDB) isOk() bool {
|
||||
keyword := "database system is ready to accept connections"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
// Start cmd in parallel.
|
||||
i.cmd.RunCmd(true)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
utils.TryTimes(20, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 20):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgGeth the geth image manager include l1geth and l2geth.
|
||||
type ImgGeth struct {
|
||||
image string
|
||||
name string
|
||||
id string
|
||||
|
||||
volume string
|
||||
ipcPath string
|
||||
httpPort int
|
||||
wsPort int
|
||||
chainID *big.Int
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgGeth return geth img instance.
|
||||
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
|
||||
img := &ImgGeth{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
|
||||
volume: volume,
|
||||
ipcPath: ipc,
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.params()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start run image and check if it is running healthily.
|
||||
func (i *ImgGeth) Start() error {
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
|
||||
// try 10 times to get chainID until is ok.
|
||||
utils.TryTimes(10, func() bool {
|
||||
client, err := ethclient.Dial(i.Endpoint())
|
||||
if err == nil && client != nil {
|
||||
i.chainID, err = client.ChainID(context.Background())
|
||||
return err == nil && i.chainID != nil
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgGeth) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
// Endpoint return the connection endpoint.
|
||||
func (i *ImgGeth) Endpoint() string {
|
||||
switch true {
|
||||
case i.httpPort != 0:
|
||||
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
|
||||
case i.wsPort != 0:
|
||||
return fmt.Sprintf("ws://127.0.0.1:%d", i.wsPort)
|
||||
default:
|
||||
return i.ipcPath
|
||||
}
|
||||
}
|
||||
|
||||
// ChainID return chainID.
|
||||
func (i *ImgGeth) ChainID() *big.Int {
|
||||
return i.chainID
|
||||
}
|
||||
|
||||
func (i *ImgGeth) isOk() bool {
|
||||
keyword := "WebSocket enabled"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
// Start cmd in parallel.
|
||||
i.cmd.RunCmd(true)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
utils.TryTimes(20, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 10):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
|
||||
// Stop the docker container.
|
||||
func (i *ImgGeth) Stop() error {
|
||||
if !i.running {
|
||||
return nil
|
||||
}
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// check if container is running, stop the running container.
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
i.id = id
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
func (i *ImgGeth) params() []string {
|
||||
cmds := []string{"run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
}
|
||||
if i.wsPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.wsPort) + ":8546"}...)
|
||||
}
|
||||
|
||||
var envs []string
|
||||
if i.ipcPath != "" {
|
||||
envs = append(envs, []string{"-e", fmt.Sprintf("IPC_PATH=%s", i.ipcPath)}...)
|
||||
}
|
||||
|
||||
if i.volume != "" {
|
||||
cmds = append(cmds, []string{"-v", fmt.Sprintf("%s:%s", i.volume, i.volume)}...)
|
||||
}
|
||||
|
||||
cmds = append(cmds, ports...)
|
||||
cmds = append(cmds, envs...)
|
||||
|
||||
return append(cmds, i.image)
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq" //nolint:golint
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
base.RunDBImage(t)
|
||||
|
||||
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.Ping())
|
||||
}
|
||||
|
||||
func TestL1Geth(t *testing.T) {
|
||||
base.RunL1Geth(t)
|
||||
|
||||
client, err := base.L1Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func TestL2Geth(t *testing.T) {
|
||||
base.RunL2Geth(t)
|
||||
|
||||
client, err := base.L2Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
@@ -9,8 +9,6 @@ require (
|
||||
github.com/docker/docker v25.0.3+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
@@ -18,9 +16,9 @@ require (
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/testcontainers/testcontainers-go v0.29.1
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1
|
||||
github.com/testcontainers/testcontainers-go v0.28.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
gorm.io/gorm v1.25.5
|
||||
@@ -145,7 +143,6 @@ require (
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
|
||||
@@ -268,7 +268,6 @@ github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXS
|
||||
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
|
||||
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
@@ -400,8 +399,6 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
@@ -443,7 +440,6 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
@@ -469,9 +465,6 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
|
||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
@@ -678,12 +671,12 @@ github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZ
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk=
|
||||
github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1 h1:47ipPM+s+ltCDOP3Sa1j95AkNb+z+WGiHLDbLU8ixuc=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1/go.mod h1:Sqh+Ef2ESdbJQjTJl57UOkEHkOc7gXvQLg1b5xh6f1Y=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1 h1:hTn3MzhR9w4btwfzr/NborGCaeNZG0MPBpufeDj10KA=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1/go.mod h1:YsWyy+pHDgvGdi0axGOx6CGXWsE6eqSaApyd1FYYSSc=
|
||||
github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8=
|
||||
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0 h1:QOCeTYZIYixg796Ik60MOaeMgpAKPbQd5pJOdTrftyg=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 h1:ff0s4JdYIdNAVSi/SrpN2Pdt1f+IjIw3AKjbHau8Un4=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
|
||||
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
|
||||
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
|
||||
|
||||
48
common/libzkp/impl/Cargo.lock
generated
48
common/libzkp/impl/Cargo.lock
generated
@@ -31,7 +31,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"ark-std 0.3.0",
|
||||
"c-kzg",
|
||||
@@ -521,7 +521,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -535,7 +535,7 @@ dependencies = [
|
||||
"mock",
|
||||
"mpt-zktrie",
|
||||
"num",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
|
||||
"rand",
|
||||
"revm-precompile",
|
||||
"serde",
|
||||
@@ -1139,7 +1139,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"ethers-core",
|
||||
@@ -1150,7 +1150,7 @@ dependencies = [
|
||||
"itertools 0.11.0",
|
||||
"num",
|
||||
"num-bigint",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -1293,7 +1293,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1485,7 +1485,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2_proofs",
|
||||
@@ -1507,7 +1507,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"env_logger 0.10.0",
|
||||
"gobuild",
|
||||
@@ -1684,7 +1684,7 @@ dependencies = [
|
||||
"log",
|
||||
"num-bigint",
|
||||
"num-traits",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=scroll-dev-1201)",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"serde",
|
||||
@@ -2142,7 +2142,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"env_logger 0.10.0",
|
||||
"eth-types",
|
||||
@@ -2292,7 +2292,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2307,7 +2307,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2-mpt-circuits",
|
||||
@@ -2315,7 +2315,7 @@ dependencies = [
|
||||
"hex",
|
||||
"log",
|
||||
"num-bigint",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
|
||||
"zktrie",
|
||||
]
|
||||
|
||||
@@ -2671,6 +2671,21 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "poseidon-circuit"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#babf5f6a69bec40b2e6523df317c073dcd0b1f97"
|
||||
dependencies = [
|
||||
"bitvec",
|
||||
"ff 0.13.0",
|
||||
"halo2_proofs",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"rand",
|
||||
"rand_xorshift",
|
||||
"thiserror",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "poseidon-circuit"
|
||||
version = "0.1.0"
|
||||
@@ -2754,7 +2769,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4441,7 +4456,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0l#837544fb393aa10e6954840494538b1511652bd3"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
@@ -4465,7 +4480,7 @@ dependencies = [
|
||||
"mpt-zktrie",
|
||||
"num",
|
||||
"num-bigint",
|
||||
"poseidon-circuit",
|
||||
"poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"rand_xorshift",
|
||||
@@ -4494,6 +4509,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"snark-verifier-sdk",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
@@ -24,7 +24,8 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.0l", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -12,6 +12,7 @@ use prover::{
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, BlockTrace, ChunkHash, ChunkProof,
|
||||
};
|
||||
use snark_verifier_sdk::verify_evm_calldata;
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
@@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
|
||||
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"" => 0,
|
||||
"shanghai" => 0,
|
||||
"bernoulli" => 1,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
|
||||
1
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 0 {
|
||||
// before upgrade#2(EIP4844)
|
||||
verify_evm_calldata(
|
||||
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
|
||||
proof.calldata(),
|
||||
)
|
||||
} else {
|
||||
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
|
||||
}
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
|
||||
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
Binary file not shown.
@@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_batch_vk();
|
||||
char* check_chunk_proofs(char* chunk_proofs);
|
||||
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
|
||||
char verify_batch_proof(char* proof);
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
|
||||
void init_chunk_prover(char* params_dir, char* assets_dir);
|
||||
void init_chunk_verifier(char* params_dir, char* assets_dir);
|
||||
|
||||
@@ -10,6 +10,9 @@ import (
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
// TestcontainerApps testcontainers struct
|
||||
@@ -129,6 +132,21 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
|
||||
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
endpoint, err := t.GetDBEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbCfg := &database.Config{
|
||||
DSN: endpoint,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
return database.InitDB(dbCfg)
|
||||
}
|
||||
|
||||
// GetL1GethClient returns a ethclient by dialing running L1Geth
|
||||
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
|
||||
endpoint, err := t.GetL1GethEndPoint()
|
||||
|
||||
@@ -5,14 +5,16 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// TestNewTestcontainerApps tests NewTestcontainerApps
|
||||
func TestNewTestcontainerApps(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
endpoint string
|
||||
client *ethclient.Client
|
||||
err error
|
||||
endpoint string
|
||||
gormDBclient *gorm.DB
|
||||
ethclient *ethclient.Client
|
||||
)
|
||||
|
||||
// test start testcontainers
|
||||
@@ -21,22 +23,25 @@ func TestNewTestcontainerApps(t *testing.T) {
|
||||
endpoint, err = testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
gormDBclient, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, gormDBclient)
|
||||
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
client, err = testApps.GetL1GethClient()
|
||||
ethclient, err = testApps.GetL1GethClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
client, err = testApps.GetL2GethClient()
|
||||
ethclient, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, client)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
// test free testcontainers
|
||||
testApps.Free()
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -528,6 +530,76 @@ func TestCodecV1BatchChallenge(t *testing.T) {
|
||||
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:]))
|
||||
}
|
||||
|
||||
func repeat(element byte, count int) string {
|
||||
result := make([]byte, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
result = append(result, element)
|
||||
}
|
||||
return "0x" + common.Bytes2Hex(result)
|
||||
}
|
||||
|
||||
func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
|
||||
nRowsData := 126914
|
||||
|
||||
for _, tc := range []struct {
|
||||
chunks [][]string
|
||||
expectedz string
|
||||
expectedy string
|
||||
}{
|
||||
// single empty chunk
|
||||
{chunks: [][]string{{}}, expectedz: "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", expectedy: "28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df51"},
|
||||
// single non-empty chunk
|
||||
{chunks: [][]string{{"0x010203"}}, expectedz: "30a9d6cfc2b87fb00d80e7fea28ebb9eff0bd526dbf1da32acfe8c5fd49632ff", expectedy: "723515444cb320fe437b9cea3b51293f5fbcb5913739ad35eab28b1863f7c312"},
|
||||
// multiple empty chunks
|
||||
{chunks: [][]string{{}, {}}, expectedz: "17772348f946a4e4adfcaf5c1690d078933b6b090ca9a52fab6c7e545b1007ae", expectedy: "05ba9abbc81a1c97f4cdaa683a7e0c731d9dfd88feef8f7b2fcfd79e593662b5"},
|
||||
// multiple non-empty chunks
|
||||
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "60376321eea0886c29bd97d95851c7b5fbdb064c8adfdadd7678617b32b3ebf2", expectedy: "50cfbcece01cadb4eade40649e17b140b31f96088097e38f020e31dfe6551604"},
|
||||
// empty chunk followed by non-empty chunk
|
||||
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "054539f03564eda9462d582703cde0788e4e27c311582ddfb19835358273a7ca", expectedy: "1fba03580b5908c4c66b48e79c10e7a34e4b27ed37a1a049b3e17e017cad5245"},
|
||||
// non-empty chunk followed by empty chunk
|
||||
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "0b82dceaa6ca4b5d704590c921accfd991b56b5ad0212e6a4e63e54915a2053b", expectedy: "2362f3a0c87f0ea11eb898ed608c7f09a42926a058d4c5d111a0f54cad10ebbd"},
|
||||
// max number of chunks all empty
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "174cd3ba9b2ae8ab789ec0b5b8e0b27ee122256ec1756c383dbf2b5b96903f1b", expectedy: "225cab9658904181671eb7abc342ffc36a6836048b64a67f0fb758439da2567b"},
|
||||
// max number of chunks all non-empty
|
||||
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1e93e961cdfb4bd26a5be48f23af4f1aa8c6bebe57a089d3250f8afb1e988bf8", expectedy: "24ed4791a70b28a6bad21c22d58f82a5ea5f9f9d2bcfc07428b494e9ae93de6e"},
|
||||
// single chunk blob full
|
||||
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "61405cb0b114dfb4d611be84bedba0fcd2e55615e193e424f1cc7b1af0df3d31", expectedy: "58609bbca10e50489b630ecb5b9347378579ed784d6a10749fd505055d35c3c0"},
|
||||
// multiple chunks blob full
|
||||
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "22533c3ea99536b4b83a89835aa91e6f0d2fc3866c201e18d7ca4b3af92fad61", expectedy: "40d4b71492e1a06ee3c273ef9003c7cb05aed021208871e13fa33302fa0f4dcc"},
|
||||
// max number of chunks only last one non-empty not full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "0e6525c0dd261e8f62342b1139062bb23bc2b8b460163364598fb29e82a4eed5", expectedy: "1db984d6deb5e84bc67d0755aa2da8fe687233147603b4ecba94d0c8463c3836"},
|
||||
// max number of chunks only last one non-empty full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "3a638eac98f22f817b84e3d81ccaa3de080f83dc80a5823a3f19320ef3cb6fc8", expectedy: "73ab100278822144e2ed8c9d986e92f7a2662fd18a51bdf96ec55848578b227a"},
|
||||
// max number of chunks but last is empty
|
||||
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "02ef442d99f450559647a7823f1be0e148c75481cc5c703c02a116e8ac531fa8", expectedy: "31743538cfc3ac43d1378a5c497ebc9462c20b4cb4470e0e7a9f7342ea948333"},
|
||||
} {
|
||||
chunks := []*encoding.Chunk{}
|
||||
|
||||
for _, c := range tc.chunks {
|
||||
block := &encoding.Block{Transactions: []*types.TransactionData{}}
|
||||
|
||||
for _, data := range c {
|
||||
tx := &types.TransactionData{Type: 0xff, Data: data}
|
||||
block.Transactions = append(block.Transactions, tx)
|
||||
}
|
||||
|
||||
chunk := &encoding.Chunk{Blocks: []*encoding.Block{block}}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
b, z, err := constructBlobPayload(chunks)
|
||||
assert.NoError(t, err)
|
||||
actualZ := hex.EncodeToString(z[:])
|
||||
assert.Equal(t, tc.expectedz, actualZ)
|
||||
|
||||
_, y, err := kzg4844.ComputeProof(*b, *z)
|
||||
assert.NoError(t, err)
|
||||
actualY := hex.EncodeToString(y[:])
|
||||
assert.Equal(t, tc.expectedy, actualY)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// CodecVersion defines the version of encoder and decoder.
|
||||
@@ -17,8 +18,18 @@ const (
|
||||
|
||||
// CodecV1 represents the version 1 of the encoder and decoder.
|
||||
CodecV1
|
||||
|
||||
// txTypeTest is a special transaction type used in unit tests.
|
||||
txTypeTest = 0xff
|
||||
)
|
||||
|
||||
func init() {
|
||||
// make sure txTypeTest will not interfere with other transaction types
|
||||
if txTypeTest == types.LegacyTxType || txTypeTest == types.AccessListTxType || txTypeTest == types.DynamicFeeTxType || txTypeTest == types.BlobTxType || txTypeTest == types.L1MessageTxType {
|
||||
log.Crit("txTypeTest is overlapping with existing transaction types")
|
||||
}
|
||||
}
|
||||
|
||||
// Block represents an L2 block.
|
||||
type Block struct {
|
||||
Header *types.Header
|
||||
@@ -134,6 +145,10 @@ func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
case txTypeTest:
|
||||
// in the tests, we simply use `data` as the RLP-encoded transaction
|
||||
return data, nil
|
||||
|
||||
case types.L1MessageTxType: // L1MessageTxType is not supported
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)
|
||||
|
||||
91
common/types/message/auth_msg.go
Normal file
91
common/types/message/auth_msg.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
// HardForkName the hard fork name
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
89
common/types/message/legacy_auth_msg.go
Normal file
89
common/types/message/legacy_auth_msg.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type LegacyAuthMsg struct {
|
||||
// Message fields
|
||||
Identity *LegacyIdentity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// LegacyIdentity contains all the fields to be signed by the prover.
|
||||
type LegacyIdentity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LegacyAuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *LegacyAuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *LegacyIdentity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
@@ -58,26 +58,6 @@ const (
|
||||
ProofTypeBatch
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// GenerateToken generates token
|
||||
func GenerateToken() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
@@ -87,65 +67,6 @@ func GenerateToken() (string, error) {
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// ProofMsg is the data structure sent to the coordinator.
|
||||
type ProofMsg struct {
|
||||
*ProofDetail `json:"zkProof"`
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) {
|
||||
hash, err := identity.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
|
||||
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.82"
|
||||
var tag = "v4.3.90"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IScrollChain
|
||||
/// @notice The interface for ScrollChain.
|
||||
interface IScrollChain {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -43,23 +45,23 @@ interface IScrollChain {
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice The latest finalized batch index.
|
||||
/// @return The latest finalized batch index.
|
||||
function lastFinalizedBatchIndex() external view returns (uint256);
|
||||
|
||||
/// @notice Return the batch hash of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The batch hash of a committed batch.
|
||||
function committedBatches(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the state root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The state root of a committed batch.
|
||||
function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the message root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The message root of a committed batch.
|
||||
function withdrawRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return whether the batch is finalized by batch index.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return Whether the batch is finalized by batch index.
|
||||
function isBatchFinalized(uint256 batchIndex) external view returns (bool);
|
||||
|
||||
/*****************************
|
||||
|
||||
@@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
/// @title MultipleVersionRollupVerifier
|
||||
/// @notice Verifies aggregate zk proofs using the appropriate verifier.
|
||||
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************/
|
||||
|
||||
/// @notice The address of ScrollChain contract.
|
||||
address immutable scrollChain;
|
||||
address public immutable scrollChain;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
@@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// The verifiers are sorted by batchIndex in increasing order.
|
||||
mapping(uint256 => Verifier[]) public legacyVerifiers;
|
||||
|
||||
/// @notice Mapping from verifier version to the lastest used zkevm verifier.
|
||||
/// @notice Mapping from verifier version to the latest used zkevm verifier.
|
||||
mapping(uint256 => Verifier) public latestVerifier;
|
||||
|
||||
/***************
|
||||
@@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************************/
|
||||
|
||||
/// @notice Return the number of legacy verifiers.
|
||||
/// @param _version The version of legacy verifiers.
|
||||
/// @return The number of legacy verifiers.
|
||||
function legacyVerifiersLength(uint256 _version) external view returns (uint256) {
|
||||
return legacyVerifiers[_version].length;
|
||||
}
|
||||
@@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// @notice Compute the verifier should be used for specific batch.
|
||||
/// @param _version The version of verifier to query.
|
||||
/// @param _batchIndex The batch index to query.
|
||||
/// @return The address of verifier.
|
||||
function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) {
|
||||
// Normally, we will use the latest verifier.
|
||||
Verifier memory _verifier = latestVerifier[_version];
|
||||
@@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of zkevm verifier.
|
||||
/// @param _version The version of the verifier.
|
||||
/// @param _startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param _verifier The address of new verifier.
|
||||
function updateVerifier(
|
||||
|
||||
@@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*************/
|
||||
|
||||
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
|
||||
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
@@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*****************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
/// @param _batchHeader The header of the genesis batch.
|
||||
/// @param _stateRoot The state root of the genesis block.
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
// check genesis batch header length
|
||||
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
@@ -877,7 +879,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
|
||||
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
|
||||
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IRollupVerifier
|
||||
/// @notice The interface for rollup verifier.
|
||||
interface IRollupVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param batchIndex The batch index to verify.
|
||||
|
||||
@@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier {
|
||||
}
|
||||
|
||||
// decodes all RLP encoded data and stores their DATA items
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region.
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continuous memory region.
|
||||
// Expects that the RLP starts with a list that defines the length
|
||||
// of the whole RLP region.
|
||||
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
|
||||
@@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier {
|
||||
}
|
||||
|
||||
// the one and only boundary check
|
||||
// in case an attacker crafted a malicous payload
|
||||
// in case an attacker crafted a malicious payload
|
||||
// and succeeds in the prior verification steps
|
||||
// then this should catch any bogus accesses
|
||||
if iszero(eq(ptr, add(proof.offset, proof.length))) {
|
||||
|
||||
@@ -12,11 +12,11 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
|
||||
coordinatorConfig "scroll-tech/coordinator/internal/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
coordinatorConfig "scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -28,7 +28,7 @@ type CoordinatorApp struct {
|
||||
Config *coordinatorConfig.Config
|
||||
ChainConfig *params.ChainConfig
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
configOriginFile string
|
||||
chainConfigOriginFile string
|
||||
@@ -37,17 +37,17 @@ type CoordinatorApp struct {
|
||||
HTTPPort int64
|
||||
|
||||
args []string
|
||||
docker.AppAPI
|
||||
*cmd.Cmd
|
||||
}
|
||||
|
||||
// NewCoordinatorApp return a new coordinatorApp manager.
|
||||
func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile string) *CoordinatorApp {
|
||||
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp)
|
||||
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", base.Timestamp)
|
||||
func NewCoordinatorApp(testApps *testcontainers.TestcontainerApps, configFile string, chainConfigFile string) *CoordinatorApp {
|
||||
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", testApps.Timestamp)
|
||||
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", testApps.Timestamp)
|
||||
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
httpPort := port.Int64() + httpStartPort
|
||||
coordinatorApp := &CoordinatorApp{
|
||||
base: base,
|
||||
testApps: testApps,
|
||||
configOriginFile: configFile,
|
||||
chainConfigOriginFile: chainConfigFile,
|
||||
coordinatorFile: coordinatorFile,
|
||||
@@ -63,14 +63,14 @@ func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile stri
|
||||
|
||||
// RunApp run coordinator-test child process by multi parameters.
|
||||
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
|
||||
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
c.Cmd = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.Cmd.RunApp(func() bool { return c.Cmd.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release coordinator-test.
|
||||
func (c *CoordinatorApp) Free() {
|
||||
if !utils.IsNil(c.AppAPI) {
|
||||
c.AppAPI.WaitExit()
|
||||
if !utils.IsNil(c.Cmd) {
|
||||
c.Cmd.WaitExit()
|
||||
}
|
||||
_ = os.Remove(c.coordinatorFile)
|
||||
}
|
||||
@@ -82,7 +82,6 @@ func (c *CoordinatorApp) HTTPEndpoint() string {
|
||||
|
||||
// MockConfig creates a new coordinator config.
|
||||
func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
base := c.base
|
||||
cfg, err := coordinatorConfig.NewConfig(c.configOriginFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -97,7 +96,11 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
MaxVerifierWorkers: 4,
|
||||
MinProverVersion: "v1.0.0",
|
||||
}
|
||||
cfg.DB.DSN = base.DBImg.Endpoint()
|
||||
endpoint, err := c.testApps.GetDBEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.DB.DSN = endpoint
|
||||
cfg.L2.ChainID = 111
|
||||
cfg.Auth.ChallengeExpireDurationSec = 1
|
||||
cfg.Auth.LoginExpireDurationSec = 1
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"fork_name": "bernoulli",
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"assets_path": ""
|
||||
|
||||
@@ -50,6 +50,7 @@ type Config struct {
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
MockMode bool `json:"mock_mode"`
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
|
||||
@@ -53,25 +53,44 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
// recover the public key
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
var publicKey string
|
||||
var err error
|
||||
if v.Message.HardForkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
HardForkName: v.Message.HardForkName,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
}
|
||||
|
||||
publicKey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
if v.Message.HardForkName == "" {
|
||||
v.Message.HardForkName = "shanghai"
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: publicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.HardForkName: v.Message.HardForkName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,5 +108,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
|
||||
if hardForkName, ok := claims[types.HardForkName]; ok {
|
||||
c.Set(types.HardForkName, hardForkName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -25,6 +26,8 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
|
||||
|
||||
Auth = NewAuthController(db)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -21,15 +23,21 @@ import (
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
|
||||
getTaskAccessCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg)
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_access_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
}, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}),
|
||||
}
|
||||
|
||||
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
|
||||
@@ -38,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
|
||||
return ptc
|
||||
}
|
||||
|
||||
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return fmt.Errorf("get prover name from context failed")
|
||||
}
|
||||
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
|
||||
if !proverVersionExist {
|
||||
return fmt.Errorf("get prover version from context failed")
|
||||
}
|
||||
|
||||
ptc.getTaskAccessCounter.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverPublicKey: publicKey.(string),
|
||||
coordinatorType.LabelProverName: proverName.(string),
|
||||
coordinatorType.LabelProverVersion: proverVersion.(string),
|
||||
}).Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
@@ -55,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := ptc.incGetTaskAccessCounter(ctx); err != nil {
|
||||
log.Warn("get_task access counter inc failed", "error", err.Error())
|
||||
}
|
||||
|
||||
result, err := proverTask.Assign(ctx, &getTaskParameter)
|
||||
if err != nil {
|
||||
nerr := fmt.Errorf("return prover task err:%w", err)
|
||||
|
||||
@@ -31,16 +31,17 @@ type BatchProverTask struct {
|
||||
|
||||
batchAttemptsExceedTotal prometheus.Counter
|
||||
batchTaskGetTaskTotal *prometheus.CounterVec
|
||||
batchTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vk: vk,
|
||||
vkMap: vkMap,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -58,6 +59,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
Name: "coordinator_batch_get_task_total",
|
||||
Help: "Total number of batch get task.",
|
||||
}, []string{"fork_name"}),
|
||||
batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"),
|
||||
}
|
||||
return bp
|
||||
}
|
||||
@@ -69,9 +71,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName)
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -83,7 +85,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
@@ -93,8 +95,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
|
||||
if err != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
@@ -179,7 +181,12 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
@@ -29,15 +29,16 @@ type ChunkProverTask struct {
|
||||
|
||||
chunkAttemptsExceedTotal prometheus.Counter
|
||||
chunkTaskGetTaskTotal *prometheus.CounterVec
|
||||
chunkTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vk: vk,
|
||||
vkMap: vkMap,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -55,6 +56,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
Name: "coordinator_chunk_get_task_total",
|
||||
Help: "Total number of chunk get task.",
|
||||
}, []string{"fork_name"}),
|
||||
chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"),
|
||||
}
|
||||
return cp
|
||||
}
|
||||
@@ -66,9 +68,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName)
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -151,7 +153,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
@@ -2,8 +2,12 @@ package provertask
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
@@ -13,11 +17,12 @@ import (
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
|
||||
// ErrHardForkName indicates client request with the wrong hard fork name
|
||||
var ErrHardForkName = fmt.Errorf("wrong hard fork name")
|
||||
var (
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
// ErrHardForkName indicates client request with the wrong hard fork name
|
||||
ErrHardForkName = fmt.Errorf("wrong hard fork name")
|
||||
)
|
||||
|
||||
// ProverTask the interface of a collector who send data to prover
|
||||
type ProverTask interface {
|
||||
@@ -28,8 +33,8 @@ type ProverTask interface {
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
vk string
|
||||
|
||||
vkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
|
||||
@@ -44,6 +49,7 @@ type proverTaskContext struct {
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
HardForkName string
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
@@ -68,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
|
||||
}
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != b.vk {
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
@@ -115,3 +133,22 @@ func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error
|
||||
|
||||
return hardForkNumber, nil
|
||||
}
|
||||
|
||||
var (
|
||||
getTaskCounterInitOnce sync.Once
|
||||
getTaskCounterVec *prometheus.CounterVec = nil
|
||||
)
|
||||
|
||||
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
|
||||
getTaskCounterInitOnce.Do(func() {
|
||||
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
}, []string{"task_type",
|
||||
coordinatorType.LabelProverName,
|
||||
coordinatorType.LabelProverPublicKey,
|
||||
coordinatorType.LabelProverVersion})
|
||||
})
|
||||
|
||||
return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType})
|
||||
}
|
||||
|
||||
@@ -134,6 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
hardForkName := ctx.GetString(coordinatorType.HardForkName)
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
@@ -156,20 +157,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
|
||||
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.verifierTotal.WithLabelValues(pv).Inc()
|
||||
|
||||
var success bool
|
||||
success := true
|
||||
var verifyErr error
|
||||
if proofMsg.Type == message.ProofTypeChunk {
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof)
|
||||
} else if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof)
|
||||
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
|
||||
if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
@@ -178,7 +178,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
|
||||
if verifyErr != nil {
|
||||
return ErrValidatorFailureVerifiedFailed
|
||||
@@ -189,7 +189,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
|
||||
|
||||
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
|
||||
|
||||
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
|
||||
m.proofSubmitFailure.Inc()
|
||||
@@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
m.validateFailureTotal.Inc()
|
||||
@@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
"cannot submit valid proof for a prover task twice",
|
||||
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", proverTask.ProverPublicKey,
|
||||
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
|
||||
)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
log.Info("proof generated by prover failed",
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
|
||||
"failureMessage", failureMsg)
|
||||
"failureMessage", failureMsg, "forkName", forkName)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
@@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
}
|
||||
|
||||
@@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
|
||||
m.validateFailureProverTaskHaveVerifier.Inc()
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
return nil
|
||||
|
||||
BIN
coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey
Normal file
BIN
coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey
Normal file
Binary file not shown.
BIN
coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey
Normal file
BIN
coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey
Normal file
Binary file not shown.
@@ -9,8 +9,26 @@ import (
|
||||
)
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
|
||||
return &Verifier{}, nil
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
@@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
}
|
||||
|
||||
// VerifyBatchProof return a mock verification result for a BatchProof.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
BatchVK string
|
||||
ChunkVK string
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]string
|
||||
BatchVKMap map[string]string
|
||||
}
|
||||
|
||||
@@ -11,9 +11,11 @@ package verifier
|
||||
import "C" //nolint:typecheck
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"unsafe"
|
||||
@@ -28,7 +30,26 @@ import (
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
return &Verifier{cfg: cfg}, nil
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
paramsPathStr := C.CString(cfg.ParamsPath)
|
||||
assetsPathStr := C.CString(cfg.AssetsPath)
|
||||
@@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
C.init_batch_verifier(paramsPathStr, assetsPathStr)
|
||||
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
|
||||
|
||||
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]string),
|
||||
BatchVKMap: make(map[string]string),
|
||||
}
|
||||
|
||||
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.BatchVKMap[cfg.ForkName] = batchVK
|
||||
v.ChunkVKMap[cfg.ForkName] = chunkVK
|
||||
|
||||
return &Verifier{
|
||||
cfg: cfg,
|
||||
BatchVK: batchVK,
|
||||
ChunkVK: chunkVK,
|
||||
}, nil
|
||||
if err := v.loadEmbedVK(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, batch verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
@@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Info("Start to verify batch proof", "forkName", forkName)
|
||||
proofStr := C.CString(string(buf))
|
||||
forkNameStr := C.CString(forkName)
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(proofStr))
|
||||
C.free(unsafe.Pointer(forkNameStr))
|
||||
}()
|
||||
|
||||
log.Info("Start to verify batch proof ...")
|
||||
verified := C.verify_batch_proof(proofStr)
|
||||
verified := C.verify_batch_proof(proofStr, forkNameStr)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
@@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
func readVK(filePat string) (string, error) {
|
||||
func (v *Verifier) readVK(filePat string) (string, error) {
|
||||
f, err := os.Open(filePat)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) {
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
}
|
||||
|
||||
//go:embed legacy_vk/*
|
||||
var legacyVKFS embed.FS
|
||||
|
||||
func (v *Verifier) loadEmbedVK() error {
|
||||
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed batch vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed chunk vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -34,7 +33,7 @@ func TestFFI(t *testing.T) {
|
||||
AssetsPath: *assetsPath,
|
||||
}
|
||||
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
v, err := NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
@@ -50,7 +49,7 @@ func TestFFI(t *testing.T) {
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
|
||||
@@ -73,22 +73,16 @@ func (*Batch) TableName() string {
|
||||
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
db = db.Where("start_chunk_index >= ?", startChunkIndex)
|
||||
db = db.Where("end_chunk_index < ?", endChunkIndex)
|
||||
|
||||
var batch Batch
|
||||
err := db.First(&batch).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBatches error: %w", err)
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err)
|
||||
}
|
||||
if batch.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
@@ -96,22 +90,16 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChun
|
||||
// GetAssignedBatch retrieves assigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
db = db.Where("start_chunk_index >= ?", startChunkIndex)
|
||||
db = db.Where("end_chunk_index < ?", endChunkIndex)
|
||||
|
||||
var batch Batch
|
||||
err := db.First(&batch).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetAssignedBatches error: %w", err)
|
||||
return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err)
|
||||
}
|
||||
if batch.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
@@ -71,22 +71,16 @@ func (*Chunk) TableName() string {
|
||||
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("start_block_number >= ?", fromBlockNum)
|
||||
db = db.Where("end_block_number < ?", toBlockNum)
|
||||
|
||||
var chunk Chunk
|
||||
err := db.First(&chunk).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunks error: %w", err)
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
|
||||
}
|
||||
if chunk.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
@@ -94,22 +88,16 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum
|
||||
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("start_block_number >= ?", fromBlockNum)
|
||||
db = db.Where("end_block_number < ?", toBlockNum)
|
||||
|
||||
var chunk Chunk
|
||||
err := db.First(&chunk).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunks error: %w", err)
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
|
||||
}
|
||||
if chunk.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
@@ -9,41 +9,36 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
db *gorm.DB
|
||||
proverTaskOrm *ProverTask
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &testing.T{}
|
||||
setupEnv(t)
|
||||
defer tearDownEnv(t)
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
tearDownEnv(t)
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -56,10 +51,11 @@ func tearDownEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
sqlDB.Close()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestProverTaskOrm(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
@@ -9,6 +9,8 @@ const (
|
||||
ProverName = "prover_name"
|
||||
// ProverVersion the prover version for context
|
||||
ProverVersion = "prover_version"
|
||||
// HardForkName the fork name for context
|
||||
HardForkName = "hard_fork_name"
|
||||
)
|
||||
|
||||
// Message the login message struct
|
||||
@@ -16,6 +18,7 @@ type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// LoginParameter for /login api
|
||||
|
||||
@@ -2,7 +2,6 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"`
|
||||
|
||||
10
coordinator/internal/types/metric.go
Normal file
10
coordinator/internal/types/metric.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package types
|
||||
|
||||
var (
|
||||
// LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below.
|
||||
LabelProverName = "prover_name"
|
||||
// LabelProverPublicKey label name for prover public key
|
||||
LabelProverPublicKey = "prover_pubkey"
|
||||
// LabelProverVersion label name for prover version
|
||||
LabelProverVersion = "prover_version"
|
||||
)
|
||||
@@ -21,8 +21,7 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/message"
|
||||
@@ -43,10 +42,9 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
dbCfg *database.Config
|
||||
conf *config.Config
|
||||
conf *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
@@ -70,13 +68,12 @@ var (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func randomURL() string {
|
||||
@@ -86,7 +83,8 @@ func randomURL() string {
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
db, err = database.InitDB(dbCfg)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -98,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ChainID: 111,
|
||||
},
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
MockMode: true,
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
@@ -115,6 +115,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
var chainConf params.ChainConfig
|
||||
for forkName, forkNumber := range nameForkMap {
|
||||
switch forkName {
|
||||
case "shanghai":
|
||||
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
|
||||
case "bernoulli":
|
||||
chainConf.BernoulliBlock = big.NewInt(forkNumber)
|
||||
case "london":
|
||||
@@ -149,20 +151,18 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.1.98"
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
dbCfg = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err = database.InitDB(dbCfg)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -199,7 +199,6 @@ func setEnv(t *testing.T) {
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
// Set up the test environment.
|
||||
base = docker.NewDockerApp()
|
||||
setEnv(t)
|
||||
|
||||
t.Run("TestHandshake", testHandshake)
|
||||
@@ -211,11 +210,6 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
|
||||
t.Run("TestTimeoutProof", testTimeoutProof)
|
||||
t.Run("TestHardFork", testHardForkAssignTask)
|
||||
|
||||
// Teardown
|
||||
t.Cleanup(func() {
|
||||
base.Free()
|
||||
})
|
||||
}
|
||||
|
||||
func testHandshake(t *testing.T) {
|
||||
@@ -268,12 +262,12 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
@@ -284,12 +278,12 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
@@ -309,12 +303,12 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
@@ -368,7 +362,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
@@ -458,7 +452,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
@@ -467,7 +461,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
@@ -476,7 +470,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
@@ -544,7 +538,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
getTaskNumber++
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success)
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
|
||||
}
|
||||
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
|
||||
})
|
||||
@@ -587,7 +581,7 @@ func testValidProof(t *testing.T) {
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -653,34 +647,21 @@ func testInvalidProof(t *testing.T) {
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create mock provers.
|
||||
provers := make([]*mockProver, 2)
|
||||
for i := 0; i < len(provers); i++ {
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
proofType := message.ProofTypeBatch
|
||||
provingStatus := verifiedFailed
|
||||
expectErrCode := types.ErrCoordinatorHandleZkProofFailure
|
||||
prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
|
||||
var (
|
||||
chunkProofStatus types.ProvingStatus
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
batchProofStatus types.ProvingStatus
|
||||
chunkActiveAttempts int16
|
||||
chunkMaxAttempts int16
|
||||
batchActiveAttempts int16
|
||||
batchMaxAttempts int16
|
||||
)
|
||||
@@ -688,24 +669,17 @@ func testInvalidProof(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
|
||||
if batchProofStatus == types.ProvingTaskAssigned {
|
||||
return
|
||||
}
|
||||
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(chunkMaxAttempts))
|
||||
assert.Equal(t, 0, int(chunkActiveAttempts))
|
||||
|
||||
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(batchMaxAttempts))
|
||||
assert.Equal(t, 0, int(batchActiveAttempts))
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
|
||||
t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -745,7 +719,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -868,14 +842,14 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.NotNil(t, proverChunkTask2)
|
||||
assert.Equal(t, chunkTask2ErrCode, types.Success)
|
||||
assert.Equal(t, chunkTask2ErrMsg, "")
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
|
||||
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
|
||||
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
|
||||
assert.NotNil(t, proverBatchTask2)
|
||||
assert.Equal(t, batchTask2ErrCode, types.Success)
|
||||
assert.Equal(t, batchTask2ErrMsg, "")
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
|
||||
// verify proof status, it should be verified now, because second prover sent valid proof
|
||||
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
|
||||
@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the prover manager.
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T) string {
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
|
||||
challengeString := r.challenge(t)
|
||||
return r.login(t, challengeString)
|
||||
return r.login(t, challengeString, forkName)
|
||||
}
|
||||
|
||||
func (r *mockProver) challenge(t *testing.T) string {
|
||||
@@ -76,18 +76,32 @@ func (r *mockProver) challenge(t *testing.T) string {
|
||||
return loginData.Token
|
||||
}
|
||||
|
||||
func (r *mockProver) login(t *testing.T, challengeString string) string {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
|
||||
var body string
|
||||
if forkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
HardForkName: forkName,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
|
||||
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
|
||||
var result ctypes.Response
|
||||
client := resty.New()
|
||||
@@ -137,7 +151,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
|
||||
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -151,7 +165,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
|
||||
assert.NoError(t, err)
|
||||
@@ -160,9 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
|
||||
}
|
||||
|
||||
// Testing expected errors returned by coordinator.
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
|
||||
//
|
||||
//nolint:unparam
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -185,7 +201,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
|
||||
return result.ErrCode, result.ErrMsg
|
||||
}
|
||||
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
|
||||
proofMsgStatus := message.StatusOk
|
||||
if proofStatus == generatedFailed {
|
||||
proofMsgStatus = message.StatusProofError
|
||||
@@ -228,7 +244,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
submitProof.Proof = string(encodeData)
|
||||
}
|
||||
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
submitProofData, err := json.Marshal(submitProof)
|
||||
|
||||
@@ -1,93 +1,89 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/common/testcontainers"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
pgDB *sqlx.DB
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
pgDB *sql.DB
|
||||
)
|
||||
|
||||
func initEnv(t *testing.T) error {
|
||||
func setupEnv(t *testing.T) {
|
||||
// Start db container.
|
||||
base.RunDBImage(t)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
gormClient, err := testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
pgDB, err = gormClient.DB()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Create db orm handler.
|
||||
factory, err := database.NewOrmFactory(base.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pgDB = factory.GetDB()
|
||||
return nil
|
||||
func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
if err := initEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
setupEnv(t)
|
||||
t.Run("testCurrent", testCurrent)
|
||||
t.Run("testStatus", testStatus)
|
||||
t.Run("testResetDB", testResetDB)
|
||||
t.Run("testMigrate", testMigrate)
|
||||
t.Run("testRollback", testRollback)
|
||||
|
||||
t.Cleanup(func() {
|
||||
base.Free()
|
||||
})
|
||||
}
|
||||
|
||||
func testCurrent(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
func testStatus(t *testing.T) {
|
||||
status := Status(pgDB.DB)
|
||||
status := Status(pgDB)
|
||||
assert.NoError(t, status)
|
||||
}
|
||||
|
||||
func testResetDB(t *testing.T) {
|
||||
assert.NoError(t, ResetDB(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, ResetDB(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(16), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB.DB)
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB.DB, nil))
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
cur, err := Current(pgDB.DB)
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, version, cur+1)
|
||||
|
||||
targetVersion := int64(0)
|
||||
assert.NoError(t, Rollback(pgDB.DB, &targetVersion))
|
||||
assert.NoError(t, Rollback(pgDB, &targetVersion))
|
||||
|
||||
cur, err = Current(pgDB.DB)
|
||||
cur, err = Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
@@ -1559,6 +1559,9 @@ github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZy
|
||||
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
|
||||
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
|
||||
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
|
||||
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
|
||||
@@ -21,14 +21,15 @@ import (
|
||||
type CoordinatorClient struct {
|
||||
client *resty.Client
|
||||
|
||||
proverName string
|
||||
priv *ecdsa.PrivateKey
|
||||
proverName string
|
||||
hardForkName string
|
||||
priv *ecdsa.PrivateKey
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewCoordinatorClient constructs a new CoordinatorClient.
|
||||
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
|
||||
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
|
||||
client := resty.New().
|
||||
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
|
||||
SetRetryCount(cfg.RetryCount).
|
||||
@@ -50,9 +51,10 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
|
||||
"retry wait time (second)", cfg.RetryWaitTimeSec)
|
||||
|
||||
return &CoordinatorClient{
|
||||
client: client,
|
||||
proverName: proverName,
|
||||
priv: priv,
|
||||
client: client,
|
||||
proverName: proverName,
|
||||
hardForkName: hardForkName,
|
||||
priv: priv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -83,6 +85,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
|
||||
ProverVersion: version.Version,
|
||||
ProverName: c.proverName,
|
||||
Challenge: challengeResult.Data.Token,
|
||||
HardForkName: c.hardForkName,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -97,10 +100,12 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}{
|
||||
Challenge: authMsg.Identity.Challenge,
|
||||
ProverName: authMsg.Identity.ProverName,
|
||||
ProverVersion: authMsg.Identity.ProverVersion,
|
||||
HardForkName: authMsg.Identity.HardForkName,
|
||||
},
|
||||
Signature: authMsg.Signature,
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ type LoginRequest struct {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
} `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
@@ -41,7 +42,6 @@ type LoginResponse struct {
|
||||
|
||||
// GetTaskRequest defines the request structure for GetTask API
|
||||
type GetTaskRequest struct {
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
TaskType message.ProofType `json:"task_type"`
|
||||
ProverHeight uint64 `json:"prover_height,omitempty"`
|
||||
VK string `json:"vk"`
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"scroll-tech/prover/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
@@ -30,7 +30,7 @@ func getIndex() int {
|
||||
type ProverApp struct {
|
||||
Config *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
originFile string
|
||||
proverFile string
|
||||
@@ -39,11 +39,11 @@ type ProverApp struct {
|
||||
index int
|
||||
name string
|
||||
args []string
|
||||
docker.AppAPI
|
||||
*cmd.Cmd
|
||||
}
|
||||
|
||||
// NewProverApp return a new proverApp manager.
|
||||
func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
|
||||
func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
|
||||
var proofType message.ProofType
|
||||
switch mockName {
|
||||
case utils.ChunkProverApp:
|
||||
@@ -54,17 +54,17 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
|
||||
return nil
|
||||
}
|
||||
name := string(mockName)
|
||||
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", base.Timestamp, name)
|
||||
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", testApps.Timestamp, name)
|
||||
proverApp := &ProverApp{
|
||||
base: base,
|
||||
testApps: testApps,
|
||||
originFile: file,
|
||||
proverFile: proverFile,
|
||||
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", base.Timestamp, name),
|
||||
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", testApps.Timestamp, name),
|
||||
index: getIndex(),
|
||||
name: name,
|
||||
args: []string{"--log.debug", "--config", proverFile},
|
||||
}
|
||||
proverApp.AppAPI = cmd.NewCmd(proverApp.name, proverApp.args...)
|
||||
proverApp.Cmd = cmd.NewCmd(proverApp.name, proverApp.args...)
|
||||
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -73,13 +73,13 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
|
||||
|
||||
// RunApp run prover-test child process by multi parameters.
|
||||
func (r *ProverApp) RunApp(t *testing.T) {
|
||||
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
|
||||
r.Cmd.RunApp(func() bool { return r.Cmd.WaitResult(t, time.Second*40, "prover start successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release prover-test.
|
||||
func (r *ProverApp) Free() {
|
||||
if !utils.IsNil(r.AppAPI) {
|
||||
r.AppAPI.WaitExit()
|
||||
if !utils.IsNil(r.Cmd) {
|
||||
r.Cmd.WaitExit()
|
||||
}
|
||||
_ = os.Remove(r.proverFile)
|
||||
_ = os.Remove(r.Config.KeystorePath)
|
||||
@@ -93,8 +93,13 @@ func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.Pro
|
||||
return err
|
||||
}
|
||||
cfg.ProverName = fmt.Sprintf("%s_%d", r.name, r.index)
|
||||
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.base.Timestamp, cfg.ProverName)
|
||||
cfg.L2Geth.Endpoint = r.base.L2gethImg.Endpoint()
|
||||
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.testApps.Timestamp, cfg.ProverName)
|
||||
|
||||
endpoint, err := r.testApps.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.L2Geth.Endpoint = endpoint
|
||||
cfg.L2Geth.Confirmations = rpc.LatestBlockNumber
|
||||
// Reuse l1geth's keystore file
|
||||
cfg.KeystorePassword = "scrolltest"
|
||||
|
||||
@@ -82,7 +82,7 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
|
||||
}
|
||||
log.Info("init prover_core successfully!")
|
||||
|
||||
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv)
|
||||
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error {
|
||||
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
|
||||
// prepare the request
|
||||
req := &client.GetTaskRequest{
|
||||
HardForkName: r.cfg.HardForkName,
|
||||
TaskType: r.Type(),
|
||||
TaskType: r.Type(),
|
||||
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
|
||||
// instead of passing vk when we login
|
||||
VK: r.proverCore.VK,
|
||||
|
||||
@@ -7,22 +7,19 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
)
|
||||
|
||||
// MockApp mockApp-test client manager.
|
||||
type MockApp struct {
|
||||
Config *config.Config
|
||||
// TODO field willl be replaced by testApps
|
||||
base *docker.App
|
||||
Config *config.Config
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
mockApps map[utils.MockAppName]docker.AppAPI
|
||||
mockApps map[utils.MockAppName]*cmd.Cmd
|
||||
|
||||
originFile string
|
||||
rollupFile string
|
||||
@@ -30,12 +27,12 @@ type MockApp struct {
|
||||
args []string
|
||||
}
|
||||
|
||||
// NewRollupApp TODO function will be replaced by NewRollupApp2
|
||||
func NewRollupApp(base *docker.App, file string) *MockApp {
|
||||
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", base.Timestamp)
|
||||
// NewRollupApp return a new rollupApp manager.
|
||||
func NewRollupApp(testApps *testcontainers.TestcontainerApps, file string) *MockApp {
|
||||
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", testApps.Timestamp)
|
||||
rollupApp := &MockApp{
|
||||
base: base,
|
||||
mockApps: make(map[utils.MockAppName]docker.AppAPI),
|
||||
testApps: testApps,
|
||||
mockApps: make(map[utils.MockAppName]*cmd.Cmd),
|
||||
originFile: file,
|
||||
rollupFile: rollupFile,
|
||||
args: []string{"--log.debug", "--config", rollupFile},
|
||||
@@ -46,22 +43,6 @@ func NewRollupApp(base *docker.App, file string) *MockApp {
|
||||
return rollupApp
|
||||
}
|
||||
|
||||
// NewRollupApp2 return a new rollupApp manager, name mush be one them.
|
||||
func NewRollupApp2(testApps *testcontainers.TestcontainerApps, file string) *MockApp {
|
||||
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", testApps.Timestamp)
|
||||
rollupApp := &MockApp{
|
||||
testApps: testApps,
|
||||
mockApps: make(map[utils.MockAppName]docker.AppAPI),
|
||||
originFile: file,
|
||||
rollupFile: rollupFile,
|
||||
args: []string{"--log.debug", "--config", rollupFile},
|
||||
}
|
||||
if err := rollupApp.MockConfig2(true); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return rollupApp
|
||||
}
|
||||
|
||||
// RunApp run rollup-test child process by multi parameters.
|
||||
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
|
||||
if !(name == utils.EventWatcherApp ||
|
||||
@@ -87,7 +68,7 @@ func (b *MockApp) WaitExit() {
|
||||
for _, app := range b.mockApps {
|
||||
app.WaitExit()
|
||||
}
|
||||
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
|
||||
b.mockApps = make(map[utils.MockAppName]*cmd.Cmd)
|
||||
}
|
||||
|
||||
// Free stop and release rollup mocked apps.
|
||||
@@ -96,35 +77,8 @@ func (b *MockApp) Free() {
|
||||
_ = os.Remove(b.rollupFile)
|
||||
}
|
||||
|
||||
// MockConfig TODO function will be replaced by MockConfig2
|
||||
// MockConfig creates a new rollup config.
|
||||
func (b *MockApp) MockConfig(store bool) error {
|
||||
base := b.base
|
||||
// Load origin rollup config file.
|
||||
cfg, err := config.NewConfig(b.originFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.DBConfig.DSN = base.DBImg.Endpoint()
|
||||
b.Config = cfg
|
||||
|
||||
if !store {
|
||||
return nil
|
||||
}
|
||||
// Store changed rollup config into a temp file.
|
||||
data, err := json.Marshal(b.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(b.rollupFile, data, 0600)
|
||||
}
|
||||
|
||||
// MockConfig2 creates a new rollup config.
|
||||
func (b *MockApp) MockConfig2(store bool) error {
|
||||
// Load origin rollup config file.
|
||||
cfg, err := config.NewConfig(b.originFile)
|
||||
if err != nil {
|
||||
|
||||
@@ -585,6 +585,24 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
|
||||
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
|
||||
if !withProof {
|
||||
txErr := r.db.Transaction(func(tx *gorm.DB) error {
|
||||
if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if txErr != nil {
|
||||
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr)
|
||||
}
|
||||
}
|
||||
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -181,9 +182,9 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
@@ -200,11 +201,30 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
|
||||
time.Sleep(time.Second)
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return batchStatus && chunkStatus
|
||||
})
|
||||
assert.True(t, ok)
|
||||
relayer.StopSenders()
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
dockercompose "scroll-tech/common/docker-compose/l1"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
@@ -26,7 +26,7 @@ var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
posL1TestEnv *dockercompose.PoSL1TestEnv
|
||||
|
||||
// l2geth client
|
||||
@@ -53,16 +53,25 @@ func setupEnv(t *testing.T) {
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
base.RunL2Geth(t)
|
||||
base.RunDBImage(t)
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
assert.NoError(t, err, "failed to create PoS L1 test environment")
|
||||
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
|
||||
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn, err := testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.DBConfig = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
DSN: dsn,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
@@ -70,7 +79,7 @@ func setupEnv(t *testing.T) {
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = base.L2Client()
|
||||
l2Cli, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
|
||||
@@ -97,19 +106,14 @@ func setupEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
base.Free()
|
||||
|
||||
var err error
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
if err != nil {
|
||||
log.Crit("failed to create PoS L1 test environment", "err", err)
|
||||
}
|
||||
if err := posL1TestEnv.Start(); err != nil {
|
||||
log.Crit("failed to start PoS L1 test environment", "err", err)
|
||||
}
|
||||
defer posL1TestEnv.Stop()
|
||||
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
if posL1TestEnv != nil {
|
||||
posL1TestEnv.Stop()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
|
||||
@@ -26,12 +26,10 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
dockercompose "scroll-tech/common/docker-compose/l1"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
@@ -39,12 +37,10 @@ import (
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
)
|
||||
|
||||
const TXBatch = 50
|
||||
|
||||
var (
|
||||
privateKey *ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
posL1TestEnv *dockercompose.PoSL1TestEnv
|
||||
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
|
||||
txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
|
||||
@@ -54,19 +50,14 @@ var (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
defer base.Free()
|
||||
|
||||
var err error
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
if err != nil {
|
||||
log.Crit("failed to create PoS L1 test environment", "err", err)
|
||||
}
|
||||
if err := posL1TestEnv.Start(); err != nil {
|
||||
log.Crit("failed to start PoS L1 test environment", "err", err)
|
||||
}
|
||||
defer posL1TestEnv.Stop()
|
||||
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
if posL1TestEnv != nil {
|
||||
posL1TestEnv.Stop()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
@@ -82,17 +73,18 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
privateKey = priv
|
||||
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
assert.NoError(t, err, "failed to create PoS L1 test environment")
|
||||
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
|
||||
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
|
||||
base.RunDBImage(t)
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
|
||||
client, err := testApps.GetL1GethClient()
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
|
||||
@@ -11,9 +11,8 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/encoding"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
@@ -23,7 +22,7 @@ var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
@@ -42,19 +41,27 @@ func setupEnv(t *testing.T) (err error) {
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
base.RunImages(t)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn, err := testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.DBConfig = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
DSN: dsn,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = base.L2Client()
|
||||
l2Cli, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
@@ -73,11 +80,12 @@ func setupDB(t *testing.T) *gorm.DB {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
|
||||
@@ -140,6 +140,20 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) (
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetChunksByBatchHash retrieves chunks by batch hash
|
||||
// for test
|
||||
func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash = ?", batchHash)
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetChunksByBatchHash error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
@@ -242,6 +256,34 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatusByBatchHash updates the proving_status for chunks within the specified batch_hash
|
||||
func (o *Chunk) UpdateProvingStatusByBatchHash(ctx context.Context, batchHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proving_status"] = int(status)
|
||||
|
||||
switch status {
|
||||
case types.ProvingTaskAssigned:
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash = ?", batchHash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Chunk.UpdateProvingStatusByBatchHash error: %w, batch hash: %v, status: %v", err, batchHash, status.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
|
||||
// The range is closed, i.e., it includes both start and end indices.
|
||||
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {
|
||||
|
||||
@@ -14,8 +14,7 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
@@ -23,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *L2Block
|
||||
@@ -37,23 +36,23 @@ var (
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &testing.T{}
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
tearDownEnv(t)
|
||||
}()
|
||||
setupEnv(t)
|
||||
defer tearDownEnv(t)
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
var err error
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -81,7 +80,6 @@ func tearDownEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
sqlDB.Close()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestL1BlockOrm(t *testing.T) {
|
||||
|
||||
@@ -97,7 +97,7 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
rollupApp = bcmd.NewRollupApp2(testApps, "../conf/config.json")
|
||||
rollupApp = bcmd.NewRollupApp(testApps, "../conf/config.json")
|
||||
|
||||
l1Client, err = posL1TestEnv.L1Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -20,11 +20,11 @@ var (
|
||||
greeterAddress = common.HexToAddress("0x7363726f6c6c6c20000000000000000000000015")
|
||||
)
|
||||
|
||||
func TestERC20(t *testing.T) {
|
||||
base.RunL2Geth(t)
|
||||
func testERC20(t *testing.T) {
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
l2Cli, err := base.L2Client()
|
||||
l2Cli, err := testApps.GetL2GethClient()
|
||||
assert.Nil(t, err)
|
||||
|
||||
token, err := erc20.NewERC20Mock(erc20Address, l2Cli)
|
||||
@@ -32,7 +32,9 @@ func TestERC20(t *testing.T) {
|
||||
privKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privKey, base.L2gethImg.ChainID())
|
||||
chainID, err := l2Cli.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privKey, chainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
authBls0, err := token.BalanceOf(nil, auth.From)
|
||||
@@ -45,7 +47,8 @@ func TestERC20(t *testing.T) {
|
||||
value := big.NewInt(1000)
|
||||
tx, err := token.Transfer(auth, erc20Address, value)
|
||||
assert.NoError(t, err)
|
||||
bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
_, err = bind.WaitMined(context.Background(), l2Cli, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
authBls1, err := token.BalanceOf(nil, auth.From)
|
||||
assert.NoError(t, err)
|
||||
@@ -58,12 +61,14 @@ func TestERC20(t *testing.T) {
|
||||
assert.Equal(t, tokenBls1.Int64(), tokenBls0.Add(tokenBls0, value).Int64())
|
||||
}
|
||||
|
||||
func TestGreeter(t *testing.T) {
|
||||
base.RunL2Geth(t)
|
||||
l2Cli, err := base.L2Client()
|
||||
func testGreeter(t *testing.T) {
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
l2Cli, err := testApps.GetL2GethClient()
|
||||
assert.Nil(t, err)
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, base.L2gethImg.ChainID())
|
||||
chainID, err := l2Cli.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, chainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
token, err := greeter.NewGreeter(greeterAddress, l2Cli)
|
||||
|
||||
@@ -10,52 +10,63 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/integration-test/orm"
|
||||
|
||||
rapp "scroll-tech/prover/cmd/app"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
capp "scroll-tech/coordinator/cmd/api/app"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
capp "scroll-tech/coordinator/cmd/api/app"
|
||||
"scroll-tech/database/migrate"
|
||||
"scroll-tech/integration-test/orm"
|
||||
rapp "scroll-tech/prover/cmd/app"
|
||||
bcmd "scroll-tech/rollup/cmd"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
rollupApp *bcmd.MockApp
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
rollupApp = bcmd.NewRollupApp(base, "../../rollup/conf/config.json")
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
if rollupApp != nil {
|
||||
rollupApp.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
rollupApp.Free()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestCoordinatorProverInteraction(t *testing.T) {
|
||||
// Start postgres docker containers
|
||||
base.RunL2Geth(t)
|
||||
base.RunDBImage(t)
|
||||
func setupEnv(t *testing.T) {
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
rollupApp = bcmd.NewRollupApp(testApps, "../../rollup/conf/config.json")
|
||||
}
|
||||
|
||||
// Init data
|
||||
dbCfg := &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
func TestFunction(t *testing.T) {
|
||||
setupEnv(t)
|
||||
t.Run("TestCoordinatorProverInteraction", testCoordinatorProverInteraction)
|
||||
t.Run("TestProverReLogin", testProverReLogin)
|
||||
t.Run("TestERC20", testERC20)
|
||||
t.Run("TestGreeter", testGreeter)
|
||||
}
|
||||
|
||||
db, err := database.InitDB(dbCfg)
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
db, err := testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
return db
|
||||
}
|
||||
|
||||
func testCoordinatorProverInteraction(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -66,7 +77,7 @@ func TestCoordinatorProverInteraction(t *testing.T) {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
|
||||
// Connect to l2geth client
|
||||
l2Client, err := base.L2Client()
|
||||
l2Client, err := testApps.GetL2GethClient()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to the l2geth client: %v", err)
|
||||
}
|
||||
@@ -111,10 +122,9 @@ func TestCoordinatorProverInteraction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
t.Log(version.Version)
|
||||
|
||||
base.Timestamp = time.Now().Nanosecond()
|
||||
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json", "./genesis.json")
|
||||
chunkProverApp := rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
batchProverApp := rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
coordinatorApp := capp.NewCoordinatorApp(testApps, "../../coordinator/conf/config.json", "./genesis.json")
|
||||
chunkProverApp := rapp.NewProverApp(testApps, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
batchProverApp := rapp.NewProverApp(testApps, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
defer coordinatorApp.Free()
|
||||
defer chunkProverApp.Free()
|
||||
defer batchProverApp.Free()
|
||||
@@ -139,17 +149,16 @@ func TestCoordinatorProverInteraction(t *testing.T) {
|
||||
coordinatorApp.WaitExit()
|
||||
}
|
||||
|
||||
func TestProverReLogin(t *testing.T) {
|
||||
// Start postgres docker containers.
|
||||
base.RunL2Geth(t)
|
||||
base.RunDBImage(t)
|
||||
func testProverReLogin(t *testing.T) {
|
||||
client, err := testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
db, err := client.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(db))
|
||||
|
||||
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
|
||||
|
||||
base.Timestamp = time.Now().Nanosecond()
|
||||
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json", "./genesis.json")
|
||||
chunkProverApp := rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
batchProverApp := rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
coordinatorApp := capp.NewCoordinatorApp(testApps, "../../coordinator/conf/config.json", "./genesis.json")
|
||||
chunkProverApp := rapp.NewProverApp(testApps, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
batchProverApp := rapp.NewProverApp(testApps, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
defer coordinatorApp.Free()
|
||||
defer chunkProverApp.Free()
|
||||
defer batchProverApp.Free()
|
||||
|
||||
Reference in New Issue
Block a user