Compare commits

..

48 Commits

Author SHA1 Message Date
colinlyguo
e929d53b0b add a new script 2024-04-15 01:16:23 +08:00
colin
e90aa04e8c Merge branch 'update-zkverifier-deployment-script' into codecv1-script 2024-04-13 01:38:34 +08:00
colin
32f0011d74 Merge branch 'develop' into codecv1-script 2024-04-13 01:37:59 +08:00
colinlyguo
f0fc344303 update-rollup-verifier-deployment-script 2024-04-12 23:30:42 +08:00
HAOYUatHZ
5b827c3c18 feat(db): add batch_data_hash & blob metadata (#1221)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-12 15:45:06 +08:00
georgehao
6b2eb80aa5 feat: print version info on service startup (#1268)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-12 15:09:26 +08:00
Péter Garamvölgyi
71f88b04f5 fix: add blobHash to challenge and piHash (#1264)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-12 11:00:37 +08:00
Zhang Zhuo
bcd9764bcd chore(libzkp): upgrade to v0.10.3 (#1267)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-12 10:51:39 +08:00
georgehao
b4f8377a08 fix(coordinator): fix coordinator recover public key inconsistent (#1265)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-12 07:19:31 +08:00
Zhang Zhuo
b52d43caa8 chore(libzkp): upgrade to v0.10.2 (#1257)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-11 13:44:09 +08:00
Mengran Lan
201bf401cd feat(coordinator): add new metric get_task_counter tracking prover info (#1235)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-11 12:47:03 +08:00
georgehao
898ac1d25c feat: update batch/chunk proving status when finalize without proof (#1255)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-11 11:15:09 +08:00
Snoppy
1336b89fb8 chore: fix typos (#1244)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-11 11:02:32 +08:00
Zhang Zhuo
73045df037 feat(coordinator): support multiple batch verifier versions (#1249)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-11 10:55:58 +08:00
Sina Pilehchiha
b3093e9eb6 fix: Implemented fixes for 4844 support fix review. (#1256) 2024-04-10 06:05:29 -04:00
colin
3d5250e52d feat(bridge-history): add puffer gateways (#1252)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-09 19:34:03 +08:00
HAOYUatHZ
b7324c76bc fix(test,verifier): fix TestFFI (#1250) 2024-04-09 16:47:43 +08:00
Péter Garamvölgyi
6d6e98bd6e test(codecv1): add zkEVM standard challenge digest test vectors (#1213)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2024-04-08 11:31:41 +08:00
JayLiu
9e35ce0ab4 test: add testcontainers (#1248) 2024-04-08 00:10:14 +08:00
colin
b86ebaefaf fix(scroll): bump version (#1246) 2024-04-07 14:26:59 +08:00
JayLiu
78a4298eda test: add testcontainers (#1229)
Co-authored-by: liuyuecai <liuyuecai@360.cn>
2024-04-07 14:26:06 +08:00
Zhang Zhuo
49d8387714 chore: upgrade libzkp to v0.10.0rc3, fix sha256 rows (#1245) 2024-04-07 14:20:51 +08:00
colinlyguo
be5c58a427 update scripts 2024-04-06 22:04:58 +08:00
colinlyguo
97c85209c5 update tx payload length 2024-04-06 22:00:22 +08:00
colinlyguo
48534f8698 fix 2024-04-06 17:46:55 +08:00
colinlyguo
378ec79d14 fix 2024-04-06 03:26:03 +08:00
colinlyguo
1054023dd5 fix script 2024-04-06 03:21:27 +08:00
colinlyguo
65481212d5 update script 2024-04-06 03:13:22 +08:00
colinlyguo
ba289fc651 update contract 2024-04-05 18:36:36 +08:00
colinlyguo
64ed273a1d fix 2024-04-05 03:03:24 +08:00
colinlyguo
c1059a3f51 fix 2024-04-05 00:20:01 +08:00
colin
70aa557968 Merge branch 'develop' into codecv1-script 2024-04-04 19:03:44 +08:00
colinlyguo
94a3c0a571 update sending script 2024-04-04 18:52:19 +08:00
HAOYUatHZ
4c6016f852 add relay-skipped-tx.ts 2024-04-04 17:29:15 +08:00
HAOYUatHZ
e53e150341 add ecc.sol foundry deployment scripts 2024-04-04 16:30:36 +08:00
HAOYUatHZ
5b6b170db1 add hash.sol foundry deployment scripts 2024-04-04 16:24:40 +08:00
HAOYUatHZ
44c77bcc87 add sha256 & ecc contracts 2024-04-04 16:19:39 +08:00
colinlyguo
364173a8d6 add finalizeBatch calldata script 2024-04-03 16:30:29 +08:00
georgehao
af2913903b feat(coordinator): optimize get_task sql (#1228)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-03 12:01:48 +08:00
Xi Lin
f8a7d70872 fix(contracts): fix number of non-skipped l1 messages (#1232)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-03 11:10:17 +08:00
Zhang Zhuo
790fc44b40 chore(libzkp): upgrade to v0.10.0l, fix keccak overflow (#1231)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-03 10:54:46 +08:00
colinlyguo
2afba3e394 fix scripts 2024-04-03 10:24:02 +08:00
Zhang Zhuo
620c71b16d fix(proving): use ChunkInfo embedded inside ChunkProof for tx_bytes (#1230)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-03 09:24:06 +08:00
colinlyguo
53b24f75f8 fix a typo 2024-04-03 02:51:01 +08:00
colinlyguo
de60ea8f55 add dump calldata scripts 2024-04-03 02:35:41 +08:00
colinlyguo
9ade976ce5 add large transaction payload script 2024-04-02 19:50:59 +08:00
colinlyguo
4bb9cf89aa dump full blob 2024-04-01 17:11:39 +08:00
colinlyguo
efe0d7d2fe feat: codecv1 script 2024-04-01 15:41:56 +08:00
94 changed files with 2309 additions and 1297 deletions

View File

@@ -23,6 +23,7 @@ type FetcherConfig struct {
DAIGatewayAddr string `json:"DAIGatewayAddr"` DAIGatewayAddr string `json:"DAIGatewayAddr"`
USDCGatewayAddr string `json:"USDCGatewayAddr"` USDCGatewayAddr string `json:"USDCGatewayAddr"`
LIDOGatewayAddr string `json:"LIDOGatewayAddr"` LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
PufferGatewayAddr string `json:"PufferGatewayAddr"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"` ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"` ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
ScrollChainAddr string `json:"ScrollChainAddr"` ScrollChainAddr string `json:"ScrollChainAddr"`

View File

@@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr)) gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
} }
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList) log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{ f := &L1FetcherLogic{

View File

@@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) { if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr)) addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr)) gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
} }
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList) log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)

View File

@@ -1,62 +1,27 @@
package database package database_test
import ( import (
"context"
"errors"
"io"
"os"
"testing" "testing"
"time"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/docker" "scroll-tech/common/database"
"scroll-tech/common/testcontainers"
"scroll-tech/common/version" "scroll-tech/common/version"
) )
func TestGormLogger(t *testing.T) {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.LvlTrace)
log.Root().SetHandler(glogger)
var gl gormLogger
gl.gethLogger = log.Root()
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}
func TestDB(t *testing.T) { func TestDB(t *testing.T) {
version.Version = "v4.1.98-aaa-bbb-ccc" version.Version = "v4.1.98-aaa-bbb-ccc"
base := docker.NewDockerApp()
base.RunDBImage(t)
dbCfg := &Config{ testApps := testcontainers.NewTestcontainerApps()
DSN: base.DBConfig.DSN, assert.NoError(t, testApps.StartPostgresContainer())
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
var err error db, err := testApps.GetGormDBClient()
db, err := InitDB(dbCfg)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := Ping(db) sqlDB, err := database.Ping(db)
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, sqlDB) assert.NotNil(t, sqlDB)
assert.NoError(t, CloseDB(db)) assert.NoError(t, database.CloseDB(db))
} }

View File

@@ -0,0 +1,35 @@
package database
import (
"context"
"errors"
"io"
"os"
"testing"
"time"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
)
func TestGormLogger(t *testing.T) {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.LvlTrace)
log.Root().SetHandler(glogger)
var gl gormLogger
gl.gethLogger = log.Root()
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}

View File

@@ -1,196 +0,0 @@
package docker
import (
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"time"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/common/utils"
)
var (
l1StartPort = 10000
l2StartPort = 20000
dbStartPort = 30000
)
// AppAPI app interface.
type AppAPI interface {
IsRunning() bool
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
WaitExit()
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
}
// App is collection struct of runtime docker images
type App struct {
L1gethImg GethImgInstance
L2gethImg GethImgInstance
DBImg ImgInstance
dbClient *sql.DB
DBConfig *database.DBConfig
DBConfigFile string
// common time stamp.
Timestamp int
}
// NewDockerApp returns new instance of dockerApp struct
func NewDockerApp() *App {
timestamp := time.Now().Nanosecond()
app := &App{
Timestamp: timestamp,
L1gethImg: newTestL1Docker(),
L2gethImg: newTestL2Docker(),
DBImg: newTestDBDocker("postgres"),
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
}
if err := app.mockDBConfig(); err != nil {
panic(err)
}
return app
}
// RunImages runs all images togather
func (b *App) RunImages(t *testing.T) {
b.RunDBImage(t)
b.RunL1Geth(t)
b.RunL2Geth(t)
}
// RunDBImage starts postgres docker container.
func (b *App) RunDBImage(t *testing.T) {
if b.DBImg.IsRunning() {
return
}
assert.NoError(t, b.DBImg.Start())
// try 5 times until the db is ready.
ok := utils.TryTimes(10, func() bool {
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
return err == nil && db != nil && db.Ping() == nil
})
assert.True(t, ok)
}
// Free clear all running images, double check and recycle docker container.
func (b *App) Free() {
if b.L1gethImg.IsRunning() {
_ = b.L1gethImg.Stop()
}
if b.L2gethImg.IsRunning() {
_ = b.L2gethImg.Stop()
}
if b.DBImg.IsRunning() {
_ = b.DBImg.Stop()
_ = os.Remove(b.DBConfigFile)
if !utils.IsNil(b.dbClient) {
_ = b.dbClient.Close()
b.dbClient = nil
}
}
}
// RunL1Geth starts l1geth docker container.
func (b *App) RunL1Geth(t *testing.T) {
if b.L1gethImg.IsRunning() {
return
}
assert.NoError(t, b.L1gethImg.Start())
}
// L1Client returns a ethclient by dialing running l1geth
func (b *App) L1Client() (*ethclient.Client, error) {
if utils.IsNil(b.L1gethImg) {
return nil, fmt.Errorf("l1 geth is not running")
}
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// RunL2Geth starts l2geth docker container.
func (b *App) RunL2Geth(t *testing.T) {
if b.L2gethImg.IsRunning() {
return
}
assert.NoError(t, b.L2gethImg.Start())
}
// L2Client returns a ethclient by dialing running l2geth
func (b *App) L2Client() (*ethclient.Client, error) {
if utils.IsNil(b.L2gethImg) {
return nil, fmt.Errorf("l2 geth is not running")
}
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// DBClient create and return *sql.DB instance.
func (b *App) DBClient(t *testing.T) *sql.DB {
if !utils.IsNil(b.dbClient) {
return b.dbClient
}
var (
cfg = b.DBConfig
err error
)
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
assert.NoError(t, err)
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
assert.NoError(t, b.dbClient.Ping())
return b.dbClient
}
func (b *App) mockDBConfig() error {
b.DBConfig = &database.DBConfig{
DSN: "",
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
if b.DBImg != nil {
b.DBConfig.DSN = b.DBImg.Endpoint()
}
data, err := json.Marshal(b.DBConfig)
if err != nil {
return err
}
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
}
func newTestL1Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
}
func newTestL2Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
}
func newTestDBDocker(driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
}

View File

@@ -1,131 +0,0 @@
package docker
import (
"context"
"fmt"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgDB the postgres image manager.
type ImgDB struct {
image string
name string
id string
dbName string
port int
password string
running bool
cmd *cmd.Cmd
}
// NewImgDB return postgres db img instance.
func NewImgDB(image, password, dbName string, port int) ImgInstance {
img := &ImgDB{
image: image,
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
password: password,
dbName: dbName,
port: port,
}
img.cmd = cmd.NewCmd("docker", img.prepare()...)
return img
}
// Start postgres db container.
func (i *ImgDB) Start() error {
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.running = i.isOk()
if !i.running {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
return nil
}
// Stop the container.
func (i *ImgDB) Stop() error {
if !i.running {
return nil
}
i.running = false
ctx := context.Background()
// stop the running container.
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
return err
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
}
// Endpoint return the dsn.
func (i *ImgDB) Endpoint() string {
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
}
// IsRunning returns docker container's running status.
func (i *ImgDB) IsRunning() bool {
return i.running
}
func (i *ImgDB) prepare() []string {
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
envs := []string{
"-e", "POSTGRES_PASSWORD=" + i.password,
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
}
cmd = append(cmd, envs...)
return append(cmd, i.image)
}
func (i *ImgDB) isOk() bool {
keyword := "database system is ready to accept connections"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 20):
return false
}
return i.id != ""
}

View File

@@ -1,174 +0,0 @@
package docker
import (
"context"
"fmt"
"math/big"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgGeth the geth image manager include l1geth and l2geth.
type ImgGeth struct {
image string
name string
id string
volume string
ipcPath string
httpPort int
wsPort int
chainID *big.Int
running bool
cmd *cmd.Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
img := &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
volume: volume,
ipcPath: ipc,
httpPort: hPort,
wsPort: wPort,
}
img.cmd = cmd.NewCmd("docker", img.params()...)
return img
}
// Start run image and check if it is running healthily.
func (i *ImgGeth) Start() error {
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.running = i.isOk()
if !i.running {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
// try 10 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, err := ethclient.Dial(i.Endpoint())
if err == nil && client != nil {
i.chainID, err = client.ChainID(context.Background())
return err == nil && i.chainID != nil
}
return false
})
return nil
}
// IsRunning returns docker container's running status.
func (i *ImgGeth) IsRunning() bool {
return i.running
}
// Endpoint return the connection endpoint.
func (i *ImgGeth) Endpoint() string {
switch true {
case i.httpPort != 0:
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
case i.wsPort != 0:
return fmt.Sprintf("ws://127.0.0.1:%d", i.wsPort)
default:
return i.ipcPath
}
}
// ChainID return chainID.
func (i *ImgGeth) ChainID() *big.Int {
return i.chainID
}
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 10):
return false
}
return i.id != ""
}
// Stop the docker container.
func (i *ImgGeth) Stop() error {
if !i.running {
return nil
}
i.running = false
ctx := context.Background()
// check if container is running, stop the running container.
id := GetContainerID(i.name)
if id != "" {
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
return err
}
i.id = id
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
}
func (i *ImgGeth) params() []string {
cmds := []string{"run", "--rm", "--name", i.name}
var ports []string
if i.httpPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
}
if i.wsPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.wsPort) + ":8546"}...)
}
var envs []string
if i.ipcPath != "" {
envs = append(envs, []string{"-e", fmt.Sprintf("IPC_PATH=%s", i.ipcPath)}...)
}
if i.volume != "" {
cmds = append(cmds, []string{"-v", fmt.Sprintf("%s:%s", i.volume, i.volume)}...)
}
cmds = append(cmds, ports...)
cmds = append(cmds, envs...)
return append(cmds, i.image)
}

View File

@@ -1,54 +0,0 @@
package docker_test
import (
"context"
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
)
var (
base *docker.App
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestDB(t *testing.T) {
base.RunDBImage(t)
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}
func TestL1Geth(t *testing.T) {
base.RunL1Geth(t)
client, err := base.L1Client()
assert.NoError(t, err)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}
func TestL2Geth(t *testing.T) {
base.RunL2Geth(t)
client, err := base.L2Client()
assert.NoError(t, err)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}

View File

@@ -9,8 +9,6 @@ require (
github.com/docker/docker v25.0.3+incompatible github.com/docker/docker v25.0.3+incompatible
github.com/gin-contrib/pprof v1.4.0 github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1 github.com/gin-gonic/gin v1.9.1
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-isatty v0.0.20
github.com/modern-go/reflect2 v1.0.2 github.com/modern-go/reflect2 v1.0.2
@@ -18,9 +16,9 @@ require (
github.com/prometheus/client_golang v1.16.0 github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
github.com/stretchr/testify v1.9.0 github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.29.1 github.com/testcontainers/testcontainers-go v0.28.0
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1 github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1 github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0
github.com/urfave/cli/v2 v2.25.7 github.com/urfave/cli/v2 v2.25.7
gorm.io/driver/postgres v1.5.0 gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.5 gorm.io/gorm v1.25.5
@@ -145,7 +143,6 @@ require (
github.com/mailru/easyjson v0.7.6 // indirect github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect

View File

@@ -268,7 +268,6 @@ github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXS
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24= github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -400,8 +399,6 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -443,7 +440,6 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
@@ -469,9 +465,6 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -678,12 +671,12 @@ github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZ
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk= github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8=
github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI= github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1 h1:47ipPM+s+ltCDOP3Sa1j95AkNb+z+WGiHLDbLU8ixuc= github.com/testcontainers/testcontainers-go/modules/compose v0.28.0 h1:QOCeTYZIYixg796Ik60MOaeMgpAKPbQd5pJOdTrftyg=
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1/go.mod h1:Sqh+Ef2ESdbJQjTJl57UOkEHkOc7gXvQLg1b5xh6f1Y= github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1 h1:hTn3MzhR9w4btwfzr/NborGCaeNZG0MPBpufeDj10KA= github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 h1:ff0s4JdYIdNAVSi/SrpN2Pdt1f+IjIw3AKjbHau8Un4=
github.com/testcontainers/testcontainers-go/modules/postgres v0.29.1/go.mod h1:YsWyy+pHDgvGdi0axGOx6CGXWsE6eqSaApyd1FYYSSc= github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c= github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw= github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA= github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=

View File

@@ -31,7 +31,7 @@ dependencies = [
[[package]] [[package]]
name = "aggregator" name = "aggregator"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"ark-std 0.3.0", "ark-std 0.3.0",
"c-kzg", "c-kzg",
@@ -521,7 +521,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]] [[package]]
name = "bus-mapping" name = "bus-mapping"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"eth-types", "eth-types",
"ethers-core", "ethers-core",
@@ -535,7 +535,7 @@ dependencies = [
"mock", "mock",
"mpt-zktrie", "mpt-zktrie",
"num", "num",
"poseidon-circuit", "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"rand", "rand",
"revm-precompile", "revm-precompile",
"serde", "serde",
@@ -1139,7 +1139,7 @@ dependencies = [
[[package]] [[package]]
name = "eth-types" name = "eth-types"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"base64 0.13.1", "base64 0.13.1",
"ethers-core", "ethers-core",
@@ -1150,7 +1150,7 @@ dependencies = [
"itertools 0.11.0", "itertools 0.11.0",
"num", "num",
"num-bigint", "num-bigint",
"poseidon-circuit", "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"regex", "regex",
"serde", "serde",
"serde_json", "serde_json",
@@ -1293,7 +1293,7 @@ dependencies = [
[[package]] [[package]]
name = "external-tracer" name = "external-tracer"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"eth-types", "eth-types",
"geth-utils", "geth-utils",
@@ -1485,7 +1485,7 @@ dependencies = [
[[package]] [[package]]
name = "gadgets" name = "gadgets"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"eth-types", "eth-types",
"halo2_proofs", "halo2_proofs",
@@ -1507,7 +1507,7 @@ dependencies = [
[[package]] [[package]]
name = "geth-utils" name = "geth-utils"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"env_logger 0.10.0", "env_logger 0.10.0",
"gobuild", "gobuild",
@@ -1684,7 +1684,7 @@ dependencies = [
"log", "log",
"num-bigint", "num-bigint",
"num-traits", "num-traits",
"poseidon-circuit", "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=scroll-dev-1201)",
"rand", "rand",
"rand_chacha", "rand_chacha",
"serde", "serde",
@@ -2142,7 +2142,7 @@ dependencies = [
[[package]] [[package]]
name = "keccak256" name = "keccak256"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"env_logger 0.10.0", "env_logger 0.10.0",
"eth-types", "eth-types",
@@ -2292,7 +2292,7 @@ dependencies = [
[[package]] [[package]]
name = "mock" name = "mock"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"eth-types", "eth-types",
"ethers-core", "ethers-core",
@@ -2307,7 +2307,7 @@ dependencies = [
[[package]] [[package]]
name = "mpt-zktrie" name = "mpt-zktrie"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"eth-types", "eth-types",
"halo2-mpt-circuits", "halo2-mpt-circuits",
@@ -2315,7 +2315,7 @@ dependencies = [
"hex", "hex",
"log", "log",
"num-bigint", "num-bigint",
"poseidon-circuit", "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"zktrie", "zktrie",
] ]
@@ -2671,6 +2671,21 @@ dependencies = [
"subtle", "subtle",
] ]
[[package]]
name = "poseidon-circuit"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#babf5f6a69bec40b2e6523df317c073dcd0b1f97"
dependencies = [
"bitvec",
"ff 0.13.0",
"halo2_proofs",
"lazy_static",
"log",
"rand",
"rand_xorshift",
"thiserror",
]
[[package]] [[package]]
name = "poseidon-circuit" name = "poseidon-circuit"
version = "0.1.0" version = "0.1.0"
@@ -2754,7 +2769,7 @@ dependencies = [
[[package]] [[package]]
name = "prover" name = "prover"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"aggregator", "aggregator",
"anyhow", "anyhow",
@@ -4441,7 +4456,7 @@ dependencies = [
[[package]] [[package]]
name = "zkevm-circuits" name = "zkevm-circuits"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0k#15cfd66323d45747b093a188569f6a93e6e981a0" source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
dependencies = [ dependencies = [
"array-init", "array-init",
"bus-mapping", "bus-mapping",
@@ -4465,7 +4480,7 @@ dependencies = [
"mpt-zktrie", "mpt-zktrie",
"num", "num",
"num-bigint", "num-bigint",
"poseidon-circuit", "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)",
"rand", "rand",
"rand_chacha", "rand_chacha",
"rand_xorshift", "rand_xorshift",
@@ -4494,6 +4509,7 @@ dependencies = [
"serde", "serde",
"serde_derive", "serde_derive",
"serde_json", "serde_json",
"snark-verifier-sdk",
] ]
[[package]] [[package]]

View File

@@ -24,7 +24,8 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
[dependencies] [dependencies]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" } halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.0k", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] } snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
base64 = "0.13.0" base64 = "0.13.0"
env_logger = "0.9.0" env_logger = "0.9.0"

View File

@@ -12,6 +12,7 @@ use prover::{
utils::{chunk_trace_to_witness_block, init_env_and_log}, utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, BlockTrace, ChunkHash, ChunkProof, BatchProof, BlockTrace, ChunkHash, ChunkProof,
}; };
use snark_verifier_sdk::verify_evm_calldata;
use std::{cell::OnceCell, env, ptr::null}; use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new(); static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof(
/// # Safety /// # Safety
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char { pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
let proof = c_char_to_vec(proof); let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap(); let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let fork_name_str = c_char_to_str(fork_name);
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof)); let fork_id = match fork_name_str {
"" => 0,
"shanghai" => 0,
"bernoulli" => 1,
_ => {
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
1
}
};
let verified = panic_catch(|| {
if fork_id == 0 {
// before upgrade#2(EIP4844)
verify_evm_calldata(
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
proof.calldata(),
)
} else {
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
}
});
verified.unwrap_or(false) as c_char verified.unwrap_or(false) as c_char
} }

Binary file not shown.

View File

@@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk(); char* get_batch_vk();
char* check_chunk_proofs(char* chunk_proofs); char* check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs); char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof); char verify_batch_proof(char* proof, char* fork_name);
void init_chunk_prover(char* params_dir, char* assets_dir); void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir); void init_chunk_verifier(char* params_dir, char* assets_dir);

View File

@@ -10,6 +10,9 @@ import (
"github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/postgres" "github.com/testcontainers/testcontainers-go/modules/postgres"
"github.com/testcontainers/testcontainers-go/wait" "github.com/testcontainers/testcontainers-go/wait"
"gorm.io/gorm"
"scroll-tech/common/database"
) )
// TestcontainerApps testcontainers struct // TestcontainerApps testcontainers struct
@@ -129,6 +132,21 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
return endpoint, nil return endpoint, nil
} }
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
endpoint, err := t.GetDBEndPoint()
if err != nil {
return nil, err
}
dbCfg := &database.Config{
DSN: endpoint,
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
return database.InitDB(dbCfg)
}
// GetL1GethClient returns a ethclient by dialing running L1Geth // GetL1GethClient returns a ethclient by dialing running L1Geth
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) { func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL1GethEndPoint() endpoint, err := t.GetL1GethEndPoint()

View File

@@ -5,14 +5,16 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
) )
// TestNewTestcontainerApps tests NewTestcontainerApps // TestNewTestcontainerApps tests NewTestcontainerApps
func TestNewTestcontainerApps(t *testing.T) { func TestNewTestcontainerApps(t *testing.T) {
var ( var (
err error err error
endpoint string endpoint string
client *ethclient.Client gormDBclient *gorm.DB
ethclient *ethclient.Client
) )
// test start testcontainers // test start testcontainers
@@ -21,22 +23,25 @@ func TestNewTestcontainerApps(t *testing.T) {
endpoint, err = testApps.GetDBEndPoint() endpoint, err = testApps.GetDBEndPoint()
assert.NoError(t, err) assert.NoError(t, err)
assert.NotEmpty(t, endpoint) assert.NotEmpty(t, endpoint)
gormDBclient, err = testApps.GetGormDBClient()
assert.NoError(t, err)
assert.NotNil(t, gormDBclient)
assert.NoError(t, testApps.StartL1GethContainer()) assert.NoError(t, testApps.StartL1GethContainer())
endpoint, err = testApps.GetL1GethEndPoint() endpoint, err = testApps.GetL1GethEndPoint()
assert.NoError(t, err) assert.NoError(t, err)
assert.NotEmpty(t, endpoint) assert.NotEmpty(t, endpoint)
client, err = testApps.GetL1GethClient() ethclient, err = testApps.GetL1GethClient()
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, client) assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartL2GethContainer()) assert.NoError(t, testApps.StartL2GethContainer())
endpoint, err = testApps.GetL2GethEndPoint() endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err) assert.NoError(t, err)
assert.NotEmpty(t, endpoint) assert.NotEmpty(t, endpoint)
client, err = testApps.GetL2GethClient() ethclient, err = testApps.GetL2GethClient()
assert.NoError(t, err) assert.NoError(t, err)
assert.NotNil(t, client) assert.NotNil(t, ethclient)
// test free testcontainers // test free testcontainers
testApps.Free() testApps.Free()

View File

@@ -0,0 +1,133 @@
package main
import (
"context"
"fmt"
"log"
"math/big"
"time"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rlp"
)
const targetTxSize = 126914
func main() {
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
privateKey, err := crypto.HexToECDSA(privateKeyHex)
if err != nil {
log.Fatalf("Invalid private key: %v", err)
}
client, err := ethclient.Dial("http://localhost:9999")
if err != nil {
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
}
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
if err != nil {
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
}
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
if err != nil {
log.Fatalf("Failed to retrieve account nonce: %v", err)
}
totalTxNum := []uint64{2, 3, 4, 5, 6}
for _, num := range totalTxNum {
prepareAndSendTransactions(client, auth, nonce, num)
nonce += num
}
}
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
gasLimit := uint64(5000000)
gasPrice := big.NewInt(1000000000)
var signedTxs []*types.Transaction
payloadSum := 0
dataPayload := make([]byte, targetTxSize/totalTxNum)
for i := range dataPayload {
dataPayload[i] = 0xff
}
for i := uint64(0); i < totalTxNum-1; i++ {
txData := &types.LegacyTx{
Nonce: initialNonce + i,
GasPrice: gasPrice,
Gas: gasLimit,
To: &auth.From,
Data: dataPayload,
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Fatalf("Failed to sign tx: %v", err)
}
rlpTxData, err := rlp.EncodeToBytes(signedTx)
if err != nil {
log.Fatalf("Failed to RLP encode the tx: %v", err)
}
payloadSum += len(rlpTxData)
signedTxs = append(signedTxs, signedTx)
}
fmt.Println("payload sum", payloadSum)
lowerBound := 0
upperBound := targetTxSize
for lowerBound <= upperBound {
mid := (lowerBound + upperBound) / 2
data := make([]byte, mid)
for i := range data {
data[i] = 0xff
}
txData := &types.LegacyTx{
Nonce: initialNonce + totalTxNum - 1,
GasPrice: gasPrice,
Gas: gasLimit,
To: &auth.From,
Data: data,
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Fatalf("Failed to sign tx: %v", err)
}
rlpTxData, err := rlp.EncodeToBytes(signedTx)
if err != nil {
log.Fatalf("Failed to RLP encode the tx: %v", err)
}
txSize := len(rlpTxData)
if payloadSum+txSize < targetTxSize {
lowerBound = mid + 1
} else if payloadSum+txSize > targetTxSize {
upperBound = mid - 1
} else {
fmt.Println("payloadSum+txSize", payloadSum+txSize)
signedTxs = append(signedTxs, signedTx)
break
}
}
for _, signedTx := range signedTxs {
if err := client.SendTransaction(context.Background(), signedTx); err != nil {
return fmt.Errorf("failed to send transaction: %v", err)
}
fmt.Printf("Transaction with nonce %d sent\n", signedTx.Nonce())
time.Sleep(10 * time.Second)
}
return nil
}

View File

@@ -0,0 +1,131 @@
package main
import (
"context"
"fmt"
"log"
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rlp"
)
const targetTxSize = 120568
func main() {
privateKeyHex := "0000000000000000000000000000000000000000000000000000000000000042"
privateKey, err := crypto.HexToECDSA(privateKeyHex)
if err != nil {
log.Fatalf("Invalid private key: %v", err)
}
client, err := ethclient.Dial("http://localhost:9999")
if err != nil {
log.Fatalf("Failed to connect to the Ethereum client: %v", err)
}
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(222222))
if err != nil {
log.Fatalf("Failed to create transactor with chain ID 222222: %v", err)
}
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
if err != nil {
log.Fatalf("Failed to retrieve account nonce: %v", err)
}
prepareAndSendTransactions(client, auth, nonce, 1)
prepareAndSendTransactions(client, auth, nonce+1, 2)
prepareAndSendTransactions(client, auth, nonce+1+2, 3)
prepareAndSendTransactions(client, auth, nonce+1+2+3, 4)
prepareAndSendTransactions(client, auth, nonce+1+2+3+4, 5)
prepareAndSendTransactions(client, auth, nonce+1+2+3+4+5, 6)
}
func prepareAndSendTransactions(client *ethclient.Client, auth *bind.TransactOpts, initialNonce uint64, totalTxNum uint64) error {
gasLimit := uint64(5000000)
gasPrice := big.NewInt(1000000000)
var signedTxs []*types.Transaction
payloadSum := 0
dataPayload := make([]byte, targetTxSize/totalTxNum)
for i := range dataPayload {
dataPayload[i] = 0xff
}
for i := uint64(0); i < totalTxNum-1; i++ {
txData := &types.LegacyTx{
Nonce: initialNonce + i,
GasPrice: gasPrice,
Gas: gasLimit,
To: &auth.From,
Data: dataPayload,
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Fatalf("Failed to sign tx: %v", err)
}
rlpTxData, err := rlp.EncodeToBytes(signedTx)
if err != nil {
log.Fatalf("Failed to RLP encode the tx: %v", err)
}
payloadSum += len(rlpTxData)
signedTxs = append(signedTxs, signedTx)
}
fmt.Println("payload sum", payloadSum)
lowerBound := 0
upperBound := targetTxSize
for lowerBound <= upperBound {
mid := (lowerBound + upperBound) / 2
data := make([]byte, mid)
for i := range data {
data[i] = 0xff
}
txData := &types.LegacyTx{
Nonce: initialNonce + totalTxNum - 1,
GasPrice: gasPrice,
Gas: gasLimit,
To: &auth.From,
Data: data,
}
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Fatalf("Failed to sign tx: %v", err)
}
rlpTxData, err := rlp.EncodeToBytes(signedTx)
if err != nil {
log.Fatalf("Failed to RLP encode the tx: %v", err)
}
txSize := len(rlpTxData)
if payloadSum+txSize < targetTxSize {
lowerBound = mid + 1
} else if payloadSum+txSize > targetTxSize {
upperBound = mid - 1
} else {
fmt.Println("payloadSum+txSize", payloadSum+txSize)
signedTxs = append(signedTxs, signedTx)
break
}
}
for i := len(signedTxs) - 1; i >= 0; i-- {
if err := client.SendTransaction(context.Background(), signedTxs[i]); err != nil {
return fmt.Errorf("failed to send transaction: %v", err)
}
fmt.Printf("Transaction with nonce %d sent\n", signedTxs[i].Nonce())
}
return nil
}

View File

@@ -227,18 +227,11 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
} }
// blob payload // blob payload
blob, z, err := constructBlobPayload(batch.Chunks) blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
daBatch := DABatch{ daBatch := DABatch{
Version: CodecV1Version, Version: CodecV1Version,
BatchIndex: batch.Index, BatchIndex: batch.Index,
@@ -281,7 +274,7 @@ func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
} }
// constructBlobPayload constructs the 4844 blob payload. // constructBlobPayload constructs the 4844 blob payload.
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) { func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk) // metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + MaxNumChunks*4 metadataLength := 2 + MaxNumChunks*4
@@ -289,8 +282,8 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
blobBytes := make([]byte, metadataLength) blobBytes := make([]byte, metadataLength)
// challenge digest preimage // challenge digest preimage
// 1 hash for metadata and 1 for each chunk // 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
challengePreimage := make([]byte, (1+MaxNumChunks)*32) challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
// the chunk data hash used for calculating the challenge preimage // the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash var chunkDataHash common.Hash
@@ -309,7 +302,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// encode L2 txs into blob payload // encode L2 txs into blob payload
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx) rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil { if err != nil {
return nil, nil, err return nil, common.Hash{}, nil, err
} }
blobBytes = append(blobBytes, rlpTxData...) blobBytes = append(blobBytes, rlpTxData...)
} }
@@ -341,9 +334,19 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// convert raw data to BLSFieldElements // convert raw data to BLSFieldElements
blob, err := makeBlobCanonical(blobBytes) blob, err := makeBlobCanonical(blobBytes)
if err != nil { if err != nil {
return nil, nil, err return nil, common.Hash{}, nil, err
} }
// compute blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
// challenge: append blob versioned hash
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
// compute z = challenge_digest % BLS_MODULUS // compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage) challengeDigest := crypto.Keccak256Hash(challengePreimage)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus) pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
@@ -354,7 +357,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
start := 32 - len(pointBytes) start := 32 - len(pointBytes)
copy(z[start:], pointBytes) copy(z[start:], pointBytes)
return blob, &z, nil return blob, blobVersionedHash, &z, nil
} }
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements. // makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.

View File

@@ -10,7 +10,9 @@ import (
"scroll-tech/common/types/encoding" "scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0" "scroll-tech/common/types/encoding/codecv0"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -477,55 +479,125 @@ func TestCodecV1BatchChallenge(t *testing.T) {
originalBatch := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}} originalBatch := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch, err := NewDABatch(originalBatch) batch, err := NewDABatch(originalBatch)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b36", hex.EncodeToString(batch.z[:])) assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef", hex.EncodeToString(batch.z[:]))
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json") trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}} chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}} originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch, err = NewDABatch(originalBatch) batch, err = NewDABatch(originalBatch)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c99294", hex.EncodeToString(batch.z[:])) assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e58", hex.EncodeToString(batch.z[:]))
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json") trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}} chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}} originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch, err = NewDABatch(originalBatch) batch, err = NewDABatch(originalBatch)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a", hex.EncodeToString(batch.z[:])) assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8", hex.EncodeToString(batch.z[:]))
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json") trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}} chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk5}} originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk5}}
batch, err = NewDABatch(originalBatch) batch, err = NewDABatch(originalBatch)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:])) assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json") trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}} chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk6}} originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk6}}
batch, err = NewDABatch(originalBatch) batch, err = NewDABatch(originalBatch)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:])) assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json") trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}} chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk7}} originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk7}}
batch, err = NewDABatch(originalBatch) batch, err = NewDABatch(originalBatch)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:])) assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
// 15 chunks // 15 chunks
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}} originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
batch, err = NewDABatch(originalBatch) batch, err = NewDABatch(originalBatch)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2", hex.EncodeToString(batch.z[:])) assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b", hex.EncodeToString(batch.z[:]))
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}} chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}} chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk8, chunk9}} originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk8, chunk9}}
batch, err = NewDABatch(originalBatch) batch, err = NewDABatch(originalBatch)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:])) assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab", hex.EncodeToString(batch.z[:]))
}
func repeat(element byte, count int) string {
result := make([]byte, 0, count)
for i := 0; i < count; i++ {
result = append(result, element)
}
return "0x" + common.Bytes2Hex(result)
}
func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
nRowsData := 126914
for _, tc := range []struct {
chunks [][]string
expectedz string
expectedy string
}{
// single empty chunk
{chunks: [][]string{{}}, expectedz: "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", expectedy: "304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd08"},
// single non-empty chunk
{chunks: [][]string{{"0x010203"}}, expectedz: "1c1d4bd5153f877d799853080aba243f2c186dd6d6064eaefacfe715c92b6354", expectedy: "24e80ed99526b0d15ba46f7ec682f517576ddae68d5131e5d351f8bae06ea7d3"},
// multiple empty chunks
{chunks: [][]string{{}, {}}, expectedz: "152c9ccfcc2884f9891f7adce2de110cf9f85bfd0e21f0933ae0636390a84d41", expectedy: "5f6f532676e25b49e2eae77513fbeca173a300b434c0a5e24fa554b68e27d582"},
// multiple non-empty chunks
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "62100f5381179ea7db7aa8fdedb0f7fc7b82730b75432d50ab41f80aeebe45a3", expectedy: "5b1f6e7a54907ddc06871853cf1f5d53bf2de0df7b61d0df84bc2c3fb80320cd"},
// empty chunk followed by non-empty chunk
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "2d94d241c4a2a8d8f02845ca40cfba344f3b42384af2045a75c82e725a184232", expectedy: "302416c177e9e7fe40c3bc4315066c117e27d246b0a33ef68cdda6dd333c485c"},
// non-empty chunk followed by empty chunk
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "7227567e3b1dbacb48a32bb85e4e99f73e4bd5620ea8cd4f5ac00a364c86af9c", expectedy: "2eb3dfd28362f35f562f779e749a555d2f1f87ddc716e95f04133d25189a391c"},
// max number of chunks all empty
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "1128ac3e22ced6af85be4335e0d03a266946a7cade8047e7fc59d6c8be642321", expectedy: "2d9b16422ce17f328fd00c99349768f0cb0c8648115eb3bd9b7864617ba88059"},
// max number of chunks all non-empty
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1a4025a3d74e70b511007dd55a2e252478c48054c6383285e8a176f33d99853b", expectedy: "12071ac2571c11220432a27b8be549392892e9baf4c654748ca206def3843940"},
// single chunk blob full
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "72714cc4a0ca75cee2d543b1f958e3d3dd59ac7df0d9d5617d8117b65295a5f2", expectedy: "4ebb690362bcbc42321309c210c99f2ebdb53b3fcf7cf3b17b78f6cfd1203ed3"},
// multiple chunks blob full
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "70eb5b4db503e59413238eef451871c5d12f2bb96c8b96ceca012f4ca0114727", expectedy: "568d0aaf280ec83f9c81ed2d80ecbdf199bd72dafb8a350007d37ea82997e455"},
// max number of chunks only last one non-empty not full blob
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "03db68ae16ee88489d52db19e6111b25630c5f23ad7cd14530aacf0cd231d476", expectedy: "24527d0b0e93b3dec0060c7b128975a8088b3104d3a297dc807ab43862a77a1a"},
// max number of chunks only last one non-empty full blob
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "677670193f73db499cede572bcb55677f0d2f13d690f9a820bd00bf584c3c241", expectedy: "1d85677f172dbdf4ad3094a17deeb1df4d7d2b7f35ecea44aebffa757811a268"},
// max number of chunks but last is empty
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "22935042dfe7df771b02c1f5cababfe508869e8f6339dabe25a8a32e37728bb0", expectedy: "48ca66fb5a094401728c3a6a517ffbd72c4d4d9a8c907e2d2f1320812f4d856f"},
} {
chunks := []*encoding.Chunk{}
for _, c := range tc.chunks {
block := &encoding.Block{Transactions: []*types.TransactionData{}}
for _, data := range c {
tx := &types.TransactionData{Type: 0xff, Data: data}
block.Transactions = append(block.Transactions, tx)
}
chunk := &encoding.Chunk{Blocks: []*encoding.Block{block}}
chunks = append(chunks, chunk)
}
b, _, z, err := constructBlobPayload(chunks)
assert.NoError(t, err)
actualZ := hex.EncodeToString(z[:])
assert.Equal(t, tc.expectedz, actualZ)
_, y, err := kzg4844.ComputeProof(*b, *z)
assert.NoError(t, err)
actualY := hex.EncodeToString(y[:])
assert.Equal(t, tc.expectedy, actualY)
}
} }
func TestCodecV1BatchBlobDataProof(t *testing.T) { func TestCodecV1BatchBlobDataProof(t *testing.T) {
@@ -536,7 +608,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
verifyData, err := batch.BlobDataProof() verifyData, err := batch.BlobDataProof()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b363d27683f7aab53cf071e2c8c8f3abfe750d206c048489450d120679cdc823f7db44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607a30dad96431f70551dd950c1426131d73ccea6d050d38dea123aad90aa8c0b734c98e8e04bd8ea8f19b415f2d85156d8", hex.EncodeToString(verifyData)) assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef423dc493f1dd7c9fbecdffa021ca4649b13e8d72231487034ec6b27e155ecfd7b44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607b38542ec811c92d86ff6f3319de06ee60c42655278ccf874f3615f450de730895276828b73db03c553b0bc7e5474a5e0", hex.EncodeToString(verifyData))
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json") trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}} chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
@@ -545,7 +617,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
verifyData, err = batch.BlobDataProof() verifyData, err = batch.BlobDataProof()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c992946dc7e51a42a31f429bc1f321dcf020b9a661225259522dba186fcfe5dc012191b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e86a0b3c76e33edb24eb07faeaa5d3f2b15a55df6ab99abf828b5803f5681dc634602eb7469ee0556563b2eccebf16ec822", hex.EncodeToString(verifyData)) assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e5821975f318babe50be728f9b52754d5ce2caa2ba82ba35b5888af1c5f28d23206b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e868e0c6fe7bd39baa5ee6339cd334a18af7c680d24e825262499e83b31633b13a9ee89813fae8441630c82bc9dce3f1e07", hex.EncodeToString(verifyData))
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json") trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}} chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
@@ -554,7 +626,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
verifyData, err = batch.BlobDataProof() verifyData, err = batch.BlobDataProof()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a24164e6ea8b7946ce5e40d2baa4f6aa0d030076f6074295288133c00e75dafa2afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d676248f5ca4a9f0d9b7fa48f5f649dc84e928161fd99ad1bd9a9879b05d29c5f718bfb3b0a696a5f3ed50b5b8c6a9d530b3ee", hex.EncodeToString(verifyData)) assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8088a01e54e3565d2e91ce6afbadf479330847d9106737875303ce17f17c48722afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d67624aee03a0f7cdb7807bc7e0b9fb20bc299af2a35e38cda816708b40f2f18db491e14a0f5d9cfe2f4c12e4ca1a219484f17", hex.EncodeToString(verifyData))
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json") trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}} chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
@@ -563,7 +635,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
verifyData, err = batch.BlobDataProof() verifyData, err = batch.BlobDataProof()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData)) assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json") trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}} chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
@@ -572,7 +644,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
verifyData, err = batch.BlobDataProof() verifyData, err = batch.BlobDataProof()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData)) assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json") trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}} chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
@@ -581,7 +653,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
verifyData, err = batch.BlobDataProof() verifyData, err = batch.BlobDataProof()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData)) assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
// 15 chunks // 15 chunks
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}} originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
@@ -589,7 +661,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
verifyData, err = batch.BlobDataProof() verifyData, err = batch.BlobDataProof()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2613ca15d051a539e3b239027f9bdbd03bd3c66c98afafb674e2a7441912cbe099743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc93291fbc65cfa558e4df12bcde442483d31072000c56f94fe012285bc5832eaee5fe1d47f1e8655539c4500f66207d8edc6", hex.EncodeToString(verifyData)) assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b54d28f1479467d8b97fb99f5257d3e5d63a81cb2d60e3564fe6ec6066a311c119743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc932a0b603cc94be2007d4b3b17af06b4fb30caf0e574d5abcfc5654079e65154679afad75844396082a7200a4e82462aeed", hex.EncodeToString(verifyData))
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}} chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}} chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
@@ -598,7 +670,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
verifyData, err = batch.BlobDataProof() verifyData, err = batch.BlobDataProof()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c4330b3863672052b3d6c6552d121b0b13f97659f49bbfb6d7fed6e4b7076e4a43383bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fdae9cb71d402cfe8bc4d659f228c41f0b9d195c5074278a2346204cfaa336f5de2244a3d53e0effa2f49c81924720e84e", hex.EncodeToString(verifyData)) assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab35b73ddb4a78fc4a8540f1d8259512c46e606a701e7ef7742e38cc4562ef53b983bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fda28e5610ca6b185d6ac30b53bd83d6366fccb1956daafa90ff6b504a966b119ebb45cb3f7085b7c1d622ee1ad27fcff9", hex.EncodeToString(verifyData))
} }
func TestCodecV1BatchSkipBitmap(t *testing.T) { func TestCodecV1BatchSkipBitmap(t *testing.T) {

View File

@@ -6,6 +6,7 @@ import (
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil" "github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
) )
// CodecVersion defines the version of encoder and decoder. // CodecVersion defines the version of encoder and decoder.
@@ -17,8 +18,18 @@ const (
// CodecV1 represents the version 1 of the encoder and decoder. // CodecV1 represents the version 1 of the encoder and decoder.
CodecV1 CodecV1
// txTypeTest is a special transaction type used in unit tests.
txTypeTest = 0xff
) )
func init() {
// make sure txTypeTest will not interfere with other transaction types
if txTypeTest == types.LegacyTxType || txTypeTest == types.AccessListTxType || txTypeTest == types.DynamicFeeTxType || txTypeTest == types.BlobTxType || txTypeTest == types.L1MessageTxType {
log.Crit("txTypeTest is overlapping with existing transaction types")
}
}
// Block represents an L2 block. // Block represents an L2 block.
type Block struct { type Block struct {
Header *types.Header Header *types.Header
@@ -134,6 +145,10 @@ func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
S: txData.S.ToInt(), S: txData.S.ToInt(),
}) })
case txTypeTest:
// in the tests, we simply use `data` as the RLP-encoded transaction
return data, nil
case types.L1MessageTxType: // L1MessageTxType is not supported case types.L1MessageTxType: // L1MessageTxType is not supported
default: default:
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type) return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)

View File

@@ -0,0 +1,91 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
// HardForkName the hard fork name
HardForkName string `json:"hard_fork_name"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -0,0 +1,89 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type LegacyAuthMsg struct {
// Message fields
Identity *LegacyIdentity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// LegacyIdentity contains all the fields to be signed by the prover.
type LegacyIdentity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *LegacyAuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *LegacyAuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *LegacyIdentity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -58,26 +58,6 @@ const (
ProofTypeBatch ProofTypeBatch
) )
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// GenerateToken generates token // GenerateToken generates token
func GenerateToken() (string, error) { func GenerateToken() (string, error) {
b := make([]byte, 16) b := make([]byte, 16)
@@ -87,65 +67,6 @@ func GenerateToken() (string, error) {
return hex.EncodeToString(b), nil return hex.EncodeToString(b), nil
} }
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// ProofMsg is the data structure sent to the coordinator. // ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct { type ProofMsg struct {
*ProofDetail `json:"zkProof"` *ProofDetail `json:"zkProof"`

View File

@@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) {
hash, err := identity.Hash() hash, err := identity.Hash()
assert.NoError(t, err) assert.NoError(t, err)
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721" expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
assert.Equal(t, expectedHash, hex.EncodeToString(hash)) assert.Equal(t, expectedHash, hex.EncodeToString(hash))
} }

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug" "runtime/debug"
) )
var tag = "v4.3.80" var tag = "v4.3.92"
var commit = func() string { var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok { if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -5,7 +5,7 @@
"license": "MIT", "license": "MIT",
"scripts": { "scripts": {
"test:hardhat": "npx hardhat test", "test:hardhat": "npx hardhat test",
"test:forge": "forge test -vvv", "test:forge": "forge test -vvv --evm-version cancun",
"test": "yarn test:hardhat && yarn test:forge", "test": "yarn test:hardhat && yarn test:forge",
"solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'", "solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'",
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'", "lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",

View File

@@ -0,0 +1,23 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity =0.8.24;
import {Script} from "forge-std/Script.sol";
import {console} from "forge-std/console.sol";
import {Ecc} from "../../src/misc/ecc.sol";
contract DeployEcc is Script {
function run() external {
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
Ecc ecc = new Ecc();
address L2_ECC_ADDR = address(ecc);
vm.stopBroadcast();
logAddress("L2_ECC_ADDR", L2_ECC_ADDR);
}
function logAddress(string memory name, address addr) internal view {
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
}
}

View File

@@ -0,0 +1,23 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity =0.8.24;
import {Script} from "forge-std/Script.sol";
import {console} from "forge-std/console.sol";
import {Hash} from "../../src/misc/hash.sol";
contract DeployHash is Script {
function run() external {
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
Hash hash = new Hash();
address L2_HASH_ADDR = address(hash);
vm.stopBroadcast();
logAddress("L2_HASH_ADDR", L2_HASH_ADDR);
}
function logAddress(string memory name, address addr) internal view {
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
}
}

View File

@@ -92,10 +92,12 @@ contract DeployL1BridgeContracts is Script {
} }
function deployMultipleVersionRollupVerifier() internal { function deployMultipleVersionRollupVerifier() internal {
uint256[] memory _versions = new uint256[](1); uint256[] memory _versions = new uint256[](2);
address[] memory _verifiers = new address[](1); address[] memory _verifiers = new address[](2);
_versions[0] = 0; _versions[0] = 0;
_verifiers[0] = address(zkEvmVerifierV1); _verifiers[0] = address(zkEvmVerifierV1);
_versions[1] = 1;
_verifiers[1] = address(zkEvmVerifierV1);
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers); rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier)); logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));

View File

@@ -0,0 +1,34 @@
/* eslint-disable node/no-missing-import */
import * as dotenv from "dotenv";
import { ethers } from "hardhat";
dotenv.config();
async function main() {
const [deployer] = await ethers.getSigners();
const l1ScrollMessengerAddress = process.env.L1_SCROLL_MESSENGER_PROXY_ADDR!;
const l2EccContractAddress = process.env.L2_ECC_ADDR!;
const payload = process.env.SKIPPED_TX_PAYLOAD!; // TODO: calc the payload, parse as bytes
const L1ScrollMessenger = await ethers.getContractAt("L1ScrollMessenger", l1ScrollMessengerAddress, deployer);
const tx = await L1ScrollMessenger.sendMessage(
l2EccContractAddress, // address _to
0, // uint256 _value
payload, // bytes memory _message
100000000 // uint256 _gasLimit
);
console.log(`calling ${l2EccContractAddress} with payload from l1, hash:`, tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -2,6 +2,8 @@
pragma solidity ^0.8.24; pragma solidity ^0.8.24;
/// @title IScrollChain
/// @notice The interface for ScrollChain.
interface IScrollChain { interface IScrollChain {
/********** /**********
* Events * * Events *
@@ -43,23 +45,23 @@ interface IScrollChain {
* Public View Functions * * Public View Functions *
*************************/ *************************/
/// @notice The latest finalized batch index. /// @return The latest finalized batch index.
function lastFinalizedBatchIndex() external view returns (uint256); function lastFinalizedBatchIndex() external view returns (uint256);
/// @notice Return the batch hash of a committed batch.
/// @param batchIndex The index of the batch. /// @param batchIndex The index of the batch.
/// @return The batch hash of a committed batch.
function committedBatches(uint256 batchIndex) external view returns (bytes32); function committedBatches(uint256 batchIndex) external view returns (bytes32);
/// @notice Return the state root of a committed batch.
/// @param batchIndex The index of the batch. /// @param batchIndex The index of the batch.
/// @return The state root of a committed batch.
function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32); function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32);
/// @notice Return the message root of a committed batch.
/// @param batchIndex The index of the batch. /// @param batchIndex The index of the batch.
/// @return The message root of a committed batch.
function withdrawRoots(uint256 batchIndex) external view returns (bytes32); function withdrawRoots(uint256 batchIndex) external view returns (bytes32);
/// @notice Return whether the batch is finalized by batch index.
/// @param batchIndex The index of the batch. /// @param batchIndex The index of the batch.
/// @return Whether the batch is finalized by batch index.
function isBatchFinalized(uint256 batchIndex) external view returns (bool); function isBatchFinalized(uint256 batchIndex) external view returns (bool);
/***************************** /*****************************

View File

@@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol";
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol"; import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol"; import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
/// @title MultipleVersionRollupVerifier
/// @notice Verifies aggregate zk proofs using the appropriate verifier.
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable { contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/********** /**********
* Events * * Events *
@@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
*************/ *************/
/// @notice The address of ScrollChain contract. /// @notice The address of ScrollChain contract.
address immutable scrollChain; address public immutable scrollChain;
/*********** /***********
* Structs * * Structs *
@@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// The verifiers are sorted by batchIndex in increasing order. /// The verifiers are sorted by batchIndex in increasing order.
mapping(uint256 => Verifier[]) public legacyVerifiers; mapping(uint256 => Verifier[]) public legacyVerifiers;
/// @notice Mapping from verifier version to the lastest used zkevm verifier. /// @notice Mapping from verifier version to the latest used zkevm verifier.
mapping(uint256 => Verifier) public latestVerifier; mapping(uint256 => Verifier) public latestVerifier;
/*************** /***************
@@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
*************************/ *************************/
/// @notice Return the number of legacy verifiers. /// @notice Return the number of legacy verifiers.
/// @param _version The version of legacy verifiers.
/// @return The number of legacy verifiers.
function legacyVerifiersLength(uint256 _version) external view returns (uint256) { function legacyVerifiersLength(uint256 _version) external view returns (uint256) {
return legacyVerifiers[_version].length; return legacyVerifiers[_version].length;
} }
@@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// @notice Compute the verifier should be used for specific batch. /// @notice Compute the verifier should be used for specific batch.
/// @param _version The version of verifier to query. /// @param _version The version of verifier to query.
/// @param _batchIndex The batch index to query. /// @param _batchIndex The batch index to query.
/// @return The address of verifier.
function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) { function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) {
// Normally, we will use the latest verifier. // Normally, we will use the latest verifier.
Verifier memory _verifier = latestVerifier[_version]; Verifier memory _verifier = latestVerifier[_version];
@@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
************************/ ************************/
/// @notice Update the address of zkevm verifier. /// @notice Update the address of zkevm verifier.
/// @param _version The version of the verifier.
/// @param _startBatchIndex The start batch index when the verifier will be used. /// @param _startBatchIndex The start batch index when the verifier will be used.
/// @param _verifier The address of new verifier. /// @param _verifier The address of new verifier.
function updateVerifier( function updateVerifier(

View File

@@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
*************/ *************/
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification. /// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A); address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the /// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
/// point evaluation precompile /// point evaluation precompile
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513; uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
/// @notice The chain id of the corresponding layer 2 chain. /// @notice The chain id of the corresponding layer 2 chain.
uint64 public immutable layer2ChainId; uint64 public immutable layer2ChainId;
@@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
*****************************/ *****************************/
/// @notice Import layer 2 genesis block /// @notice Import layer 2 genesis block
/// @param _batchHeader The header of the genesis batch.
/// @param _stateRoot The state root of the genesis block.
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external { function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
// check genesis batch header length // check genesis batch header length
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero(); if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
@@ -475,7 +477,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
_postStateRoot, _postStateRoot,
_withdrawRoot, _withdrawRoot,
_dataHash, _dataHash,
_blobDataProof[0:64] _blobDataProof[0:64],
_blobVersionedHash
) )
); );
@@ -877,7 +880,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr); uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs(); if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
unchecked { unchecked {
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages _totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs _totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock; _totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock; _totalL1MessagesPoppedOverall += _numL1MessagesInBlock;

View File

@@ -2,6 +2,8 @@
pragma solidity ^0.8.24; pragma solidity ^0.8.24;
/// @title IRollupVerifier
/// @notice The interface for rollup verifier.
interface IRollupVerifier { interface IRollupVerifier {
/// @notice Verify aggregate zk proof. /// @notice Verify aggregate zk proof.
/// @param batchIndex The batch index to verify. /// @param batchIndex The batch index to verify.

View File

@@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier {
} }
// decodes all RLP encoded data and stores their DATA items // decodes all RLP encoded data and stores their DATA items
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region. // [length - 128 bits | calldata offset - 128 bits] in a continuous memory region.
// Expects that the RLP starts with a list that defines the length // Expects that the RLP starts with a list that defines the length
// of the whole RLP region. // of the whole RLP region.
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash { function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
@@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier {
} }
// the one and only boundary check // the one and only boundary check
// in case an attacker crafted a malicous payload // in case an attacker crafted a malicious payload
// and succeeds in the prior verification steps // and succeeds in the prior verification steps
// then this should catch any bogus accesses // then this should catch any bogus accesses
if iszero(eq(ptr, add(proof.offset, proof.length))) { if iszero(eq(ptr, add(proof.offset, proof.length))) {

127
contracts/src/misc/ecc.sol Normal file
View File

@@ -0,0 +1,127 @@
// SPDX-License-Identifier: GPL-3.0
pragma solidity =0.8.24;
contract Ecc {
/* ECC Functions */
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
function ecAdd(uint256[2] memory p0, uint256[2] memory p1) public view returns (uint256[2] memory retP) {
uint256[4] memory i = [p0[0], p0[1], p1[0], p1[1]];
assembly {
// call ecadd precompile
// inputs are: x1, y1, x2, y2
if iszero(staticcall(not(0), 0x06, i, 0x80, retP, 0x40)) {
revert(0, 0)
}
}
}
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
function ecMul(uint256[2] memory p, uint256 s) public view returns (uint256[2] memory retP) {
// With a public key (x, y), this computes p = scalar * (x, y).
uint256[3] memory i = [p[0], p[1], s];
assembly {
// call ecmul precompile
// inputs are: x, y, scalar
if iszero(staticcall(not(0), 0x07, i, 0x60, retP, 0x40)) {
revert(0, 0)
}
}
}
// scroll-tech/scroll/contracts/src/libraries/verifier/RollupVerifier.sol
struct G1Point {
uint256 x;
uint256 y;
}
struct G2Point {
uint256[2] x;
uint256[2] y;
}
function ecPairing(G1Point[] memory p1, G2Point[] memory p2) internal view returns (bool) {
uint256 length = p1.length * 6;
uint256[] memory input = new uint256[](length);
uint256[1] memory result;
bool ret;
require(p1.length == p2.length);
for (uint256 i = 0; i < p1.length; i++) {
input[0 + i * 6] = p1[i].x;
input[1 + i * 6] = p1[i].y;
input[2 + i * 6] = p2[i].x[0];
input[3 + i * 6] = p2[i].x[1];
input[4 + i * 6] = p2[i].y[0];
input[5 + i * 6] = p2[i].y[1];
}
assembly {
ret := staticcall(gas(), 8, add(input, 0x20), mul(length, 0x20), result, 0x20)
}
require(ret);
return result[0] != 0;
}
/* Bench */
function ecAdds(uint256 n) public {
uint256[2] memory p0;
p0[0] = 1;
p0[1] = 2;
uint256[2] memory p1;
p1[0] = 1;
p1[1] = 2;
for (uint256 i = 0; i < n; i++) {
ecAdd(p0, p1);
}
}
function ecMuls(uint256 n) public {
uint256[2] memory p0;
p0[0] = 1;
p0[1] = 2;
for (uint256 i = 0; i < n; i++) {
ecMul(p0, 3);
}
}
function ecPairings(uint256 n) public {
G1Point[] memory g1_points = new G1Point[](2);
G2Point[] memory g2_points = new G2Point[](2);
g1_points[0].x = 0x0000000000000000000000000000000000000000000000000000000000000001;
g1_points[0].y = 0x0000000000000000000000000000000000000000000000000000000000000002;
g2_points[0].x[1] = 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed;
g2_points[0].x[0] = 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2;
g2_points[0].y[1] = 0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa;
g2_points[0].y[0] = 0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b;
g1_points[1].x = 0x1aa125a22bd902874034e67868aed40267e5575d5919677987e3bc6dd42a32fe;
g1_points[1].y = 0x1bacc186725464068956d9a191455c2d6f6db282d83645c610510d8d4efbaee0;
g2_points[1].x[1] = 0x1b7734c80605f71f1e2de61e998ce5854ff2abebb76537c3d67e50d71422a852;
g2_points[1].x[0] = 0x10d5a1e34b2388a5ebe266033a5e0e63c89084203784da0c6bd9b052a78a2cac;
g2_points[1].y[1] = 0x275739c5c2cdbc72e37c689e2ab441ea76c1d284b9c46ae8f5c42ead937819e1;
g2_points[1].y[0] = 0x018de34c5b7c3d3d75428bbe050f1449ea3d9961d563291f307a1874f7332e65;
for (uint256 i = 0; i < n; i++) {
ecPairing(g1_points, g2_points);
// bool checked = false;
// checked = ecPairing(g1_points, g2_points);
// require(checked);
}
}
// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/8a0b7bed82d6b8053872c3fd40703efd58f5699d/test/utils/cryptography/ECDSA.test.js#L230
function ecRecovers(uint256 n) public {
bytes32 hash = 0xb94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9;
bytes32 r = 0xe742ff452d41413616a5bf43fe15dd88294e983d3d36206c2712f39083d638bd;
uint8 v = 0x1b;
bytes32 s = 0xe0a0fc89be718fbc1033e1d30d78be1c68081562ed2e97af876f286f3453231d;
for (uint256 i = 0; i < n; i++) {
ecrecover(hash, v, r, s);
}
}
}

View File

@@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-3.0
pragma solidity =0.8.24;
contract Hash {
function sha256(bytes memory input) public view returns (bytes memory out) {
(bool ok, bytes memory out) = address(2).staticcall(input);
require(ok);
}
function sha256Yul(bytes memory input) public view returns (bytes memory out) {
assembly {
// mstore(0, input)
if iszero(staticcall(gas(), 2, 0, 32, 0, 32)) {
revert(0, 0)
}
// return(0, 32)
}
}
function sha256s(bytes memory input, uint256 n) public {
for (uint256 i = 0; i < n; i++) {
sha256(input);
}
}
function keccak256s(uint256 n) public {
bytes32[] memory output = new bytes32[](n);
for (uint256 i = 0; i < n; i++) {
bytes memory input = abi.encode(i);
output[i] = keccak256(input);
}
}
}

View File

@@ -83,6 +83,8 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
} }
function testTransferUSDCRoles(address owner) external { function testTransferUSDCRoles(address owner) external {
hevm.assume(owner != address(0));
// non-whitelisted caller call, should revert // non-whitelisted caller call, should revert
hevm.expectRevert("only circle caller"); hevm.expectRevert("only circle caller");
gateway.transferUSDCRoles(owner); gateway.transferUSDCRoles(owner);

View File

@@ -12,11 +12,11 @@ import (
"github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/params"
coordinatorConfig "scroll-tech/coordinator/internal/config"
"scroll-tech/common/cmd" "scroll-tech/common/cmd"
"scroll-tech/common/docker" "scroll-tech/common/testcontainers"
"scroll-tech/common/utils" "scroll-tech/common/utils"
coordinatorConfig "scroll-tech/coordinator/internal/config"
) )
var ( var (
@@ -28,7 +28,7 @@ type CoordinatorApp struct {
Config *coordinatorConfig.Config Config *coordinatorConfig.Config
ChainConfig *params.ChainConfig ChainConfig *params.ChainConfig
base *docker.App testApps *testcontainers.TestcontainerApps
configOriginFile string configOriginFile string
chainConfigOriginFile string chainConfigOriginFile string
@@ -37,17 +37,17 @@ type CoordinatorApp struct {
HTTPPort int64 HTTPPort int64
args []string args []string
docker.AppAPI *cmd.Cmd
} }
// NewCoordinatorApp return a new coordinatorApp manager. // NewCoordinatorApp return a new coordinatorApp manager.
func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile string) *CoordinatorApp { func NewCoordinatorApp(testApps *testcontainers.TestcontainerApps, configFile string, chainConfigFile string) *CoordinatorApp {
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp) coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", testApps.Timestamp)
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", base.Timestamp) genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", testApps.Timestamp)
port, _ := rand.Int(rand.Reader, big.NewInt(2000)) port, _ := rand.Int(rand.Reader, big.NewInt(2000))
httpPort := port.Int64() + httpStartPort httpPort := port.Int64() + httpStartPort
coordinatorApp := &CoordinatorApp{ coordinatorApp := &CoordinatorApp{
base: base, testApps: testApps,
configOriginFile: configFile, configOriginFile: configFile,
chainConfigOriginFile: chainConfigFile, chainConfigOriginFile: chainConfigFile,
coordinatorFile: coordinatorFile, coordinatorFile: coordinatorFile,
@@ -63,14 +63,14 @@ func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile stri
// RunApp run coordinator-test child process by multi parameters. // RunApp run coordinator-test child process by multi parameters.
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) { func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...) c.Cmd = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") }) c.Cmd.RunApp(func() bool { return c.Cmd.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
} }
// Free stop and release coordinator-test. // Free stop and release coordinator-test.
func (c *CoordinatorApp) Free() { func (c *CoordinatorApp) Free() {
if !utils.IsNil(c.AppAPI) { if !utils.IsNil(c.Cmd) {
c.AppAPI.WaitExit() c.Cmd.WaitExit()
} }
_ = os.Remove(c.coordinatorFile) _ = os.Remove(c.coordinatorFile)
} }
@@ -82,7 +82,6 @@ func (c *CoordinatorApp) HTTPEndpoint() string {
// MockConfig creates a new coordinator config. // MockConfig creates a new coordinator config.
func (c *CoordinatorApp) MockConfig(store bool) error { func (c *CoordinatorApp) MockConfig(store bool) error {
base := c.base
cfg, err := coordinatorConfig.NewConfig(c.configOriginFile) cfg, err := coordinatorConfig.NewConfig(c.configOriginFile)
if err != nil { if err != nil {
return err return err
@@ -97,7 +96,11 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
MaxVerifierWorkers: 4, MaxVerifierWorkers: 4,
MinProverVersion: "v1.0.0", MinProverVersion: "v1.0.0",
} }
cfg.DB.DSN = base.DBImg.Endpoint() endpoint, err := c.testApps.GetDBEndPoint()
if err != nil {
return err
}
cfg.DB.DSN = endpoint
cfg.L2.ChainID = 111 cfg.L2.ChainID = 111
cfg.Auth.ChallengeExpireDurationSec = 1 cfg.Auth.ChallengeExpireDurationSec = 1
cfg.Auth.LoginExpireDurationSec = 1 cfg.Auth.LoginExpireDurationSec = 1

View File

@@ -5,6 +5,7 @@
"batch_collection_time_sec": 180, "batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180, "chunk_collection_time_sec": 180,
"verifier": { "verifier": {
"fork_name": "bernoulli",
"mock_mode": true, "mock_mode": true,
"params_path": "", "params_path": "",
"assets_path": "" "assets_path": ""

View File

@@ -50,6 +50,7 @@ type Config struct {
// VerifierConfig load zk verifier config. // VerifierConfig load zk verifier config.
type VerifierConfig struct { type VerifierConfig struct {
ForkName string `json:"fork_name"`
MockMode bool `json:"mock_mode"` MockMode bool `json:"mock_mode"`
ParamsPath string `json:"params_path"` ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"` AssetsPath string `json:"assets_path"`

View File

@@ -53,25 +53,44 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
return jwt.MapClaims{} return jwt.MapClaims{}
} }
// recover the public key var publicKey string
authMsg := message.AuthMsg{ var err error
Identity: &message.Identity{ if v.Message.HardForkName != "" {
Challenge: v.Message.Challenge, authMsg := message.AuthMsg{
ProverName: v.Message.ProverName, Identity: &message.Identity{
ProverVersion: v.Message.ProverVersion, Challenge: v.Message.Challenge,
}, ProverName: v.Message.ProverName,
Signature: v.Signature, ProverVersion: v.Message.ProverVersion,
HardForkName: v.Message.HardForkName,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
} }
publicKey, err := authMsg.PublicKey()
if err != nil { if err != nil {
return jwt.MapClaims{} return jwt.MapClaims{}
} }
if v.Message.HardForkName == "" {
v.Message.HardForkName = "shanghai"
}
return jwt.MapClaims{ return jwt.MapClaims{
types.PublicKey: publicKey, types.PublicKey: publicKey,
types.ProverName: v.Message.ProverName, types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion, types.ProverVersion: v.Message.ProverVersion,
types.HardForkName: v.Message.HardForkName,
} }
} }
@@ -89,5 +108,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
if proverVersion, ok := claims[types.ProverVersion]; ok { if proverVersion, ok := claims[types.ProverVersion]; ok {
c.Set(types.ProverVersion, proverVersion) c.Set(types.ProverVersion, proverVersion)
} }
if hardForkName, ok := claims[types.HardForkName]; ok {
c.Set(types.HardForkName, hardForkName)
}
return nil return nil
} }

View File

@@ -2,6 +2,7 @@ package api
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm" "gorm.io/gorm"
@@ -25,6 +26,8 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
panic("proof receiver new verifier failure") panic("proof receiver new verifier failure")
} }
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
Auth = NewAuthController(db) Auth = NewAuthController(db)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg) GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg) SubmitProof = NewSubmitProofController(cfg, db, vf, reg)

View File

@@ -6,6 +6,8 @@ import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm" "gorm.io/gorm"
@@ -21,15 +23,21 @@ import (
// GetTaskController the get prover task api controller // GetTaskController the get prover task api controller
type GetTaskController struct { type GetTaskController struct {
proverTasks map[message.ProofType]provertask.ProverTask proverTasks map[message.ProofType]provertask.ProverTask
getTaskAccessCounter *prometheus.CounterVec
} }
// NewGetTaskController create a get prover task controller // NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController { func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg) chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg) batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
ptc := &GetTaskController{ ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask), proverTasks: make(map[message.ProofType]provertask.ProverTask),
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_get_task_access_count",
Help: "Multi dimensions get task counter.",
}, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}),
} }
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
@@ -38,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
return ptc return ptc
} }
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return fmt.Errorf("get public key from context failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return fmt.Errorf("get prover version from context failed")
}
ptc.getTaskAccessCounter.With(prometheus.Labels{
coordinatorType.LabelProverPublicKey: publicKey.(string),
coordinatorType.LabelProverName: proverName.(string),
coordinatorType.LabelProverVersion: proverVersion.(string),
}).Inc()
return nil
}
// GetTasks get assigned chunk/batch task // GetTasks get assigned chunk/batch task
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) { func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter var getTaskParameter coordinatorType.GetTaskParameter
@@ -55,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
return return
} }
if err := ptc.incGetTaskAccessCounter(ctx); err != nil {
log.Warn("get_task access counter inc failed", "error", err.Error())
}
result, err := proverTask.Assign(ctx, &getTaskParameter) result, err := proverTask.Assign(ctx, &getTaskParameter)
if err != nil { if err != nil {
nerr := fmt.Errorf("return prover task err:%w", err) nerr := fmt.Errorf("return prover task err:%w", err)

View File

@@ -31,16 +31,17 @@ type BatchProverTask struct {
batchAttemptsExceedTotal prometheus.Counter batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal *prometheus.CounterVec batchTaskGetTaskTotal *prometheus.CounterVec
batchTaskGetTaskProver *prometheus.CounterVec
} }
// NewBatchProverTask new a batch collector // NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask { func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg) forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap) log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
bp := &BatchProverTask{ bp := &BatchProverTask{
BaseProverTask: BaseProverTask{ BaseProverTask: BaseProverTask{
vk: vk, vkMap: vkMap,
db: db, db: db,
cfg: cfg, cfg: cfg,
nameForkMap: nameForkMap, nameForkMap: nameForkMap,
@@ -58,6 +59,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_batch_get_task_total", Name: "coordinator_batch_get_task_total",
Help: "Total number of batch get task.", Help: "Total number of batch get task.",
}, []string{"fork_name"}), }, []string{"fork_name"}),
batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"),
} }
return bp return bp
} }
@@ -69,9 +71,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
} }
hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName) hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil { if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName) log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err return nil, err
} }
@@ -83,7 +85,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if fromBlockNum != 0 { if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum) startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
if chunkErr != nil { if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr) log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
} }
if startChunk == nil { if startChunk == nil {
@@ -93,8 +95,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
} }
if toBlockNum != math.MaxInt64 { if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum) toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
if err != nil { if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr) log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
} }
if toChunk != nil { if toChunk != nil {
@@ -179,7 +181,12 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
} }
bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc() bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil return taskMsg, nil
} }
@@ -209,6 +216,9 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
DataHash: common.HexToHash(chunk.Hash), DataHash: common.HexToHash(chunk.Hash),
IsPadding: false, IsPadding: false,
} }
if proof.ChunkInfo != nil {
chunkInfo.TxBytes = proof.ChunkInfo.TxBytes
}
chunkInfos = append(chunkInfos, &chunkInfo) chunkInfos = append(chunkInfos, &chunkInfo)
} }

View File

@@ -29,15 +29,16 @@ type ChunkProverTask struct {
chunkAttemptsExceedTotal prometheus.Counter chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal *prometheus.CounterVec chunkTaskGetTaskTotal *prometheus.CounterVec
chunkTaskGetTaskProver *prometheus.CounterVec
} }
// NewChunkProverTask new a chunk prover task // NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask { func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg) forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap) log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
cp := &ChunkProverTask{ cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{ BaseProverTask: BaseProverTask{
vk: vk, vkMap: vkMap,
db: db, db: db,
cfg: cfg, cfg: cfg,
nameForkMap: nameForkMap, nameForkMap: nameForkMap,
@@ -55,6 +56,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_chunk_get_task_total", Name: "coordinator_chunk_get_task_total",
Help: "Total number of chunk get task.", Help: "Total number of chunk get task.",
}, []string{"fork_name"}), }, []string{"fork_name"}),
chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"),
} }
return cp return cp
} }
@@ -66,9 +68,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
} }
hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName) hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil { if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName) log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err return nil, err
} }
@@ -151,7 +153,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure return nil, ErrCoordinatorInternalFailure
} }
cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc() cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil return taskMsg, nil
} }

View File

@@ -2,8 +2,12 @@ package provertask
import ( import (
"fmt" "fmt"
"sync"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/version" "scroll-tech/common/version"
@@ -13,11 +17,12 @@ import (
coordinatorType "scroll-tech/coordinator/internal/types" coordinatorType "scroll-tech/coordinator/internal/types"
) )
// ErrCoordinatorInternalFailure coordinator internal db failure var (
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error") // ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name // ErrHardForkName indicates client request with the wrong hard fork name
var ErrHardForkName = fmt.Errorf("wrong hard fork name") ErrHardForkName = fmt.Errorf("wrong hard fork name")
)
// ProverTask the interface of a collector who send data to prover // ProverTask the interface of a collector who send data to prover
type ProverTask interface { type ProverTask interface {
@@ -28,8 +33,8 @@ type ProverTask interface {
type BaseProverTask struct { type BaseProverTask struct {
cfg *config.Config cfg *config.Config
db *gorm.DB db *gorm.DB
vk string
vkMap map[string]string
nameForkMap map[string]uint64 nameForkMap map[string]uint64
forkHeights []uint64 forkHeights []uint64
@@ -44,6 +49,7 @@ type proverTaskContext struct {
PublicKey string PublicKey string
ProverName string ProverName string
ProverVersion string ProverVersion string
HardForkName string
} }
// checkParameter check the prover task parameter illegal // checkParameter check the prover task parameter illegal
@@ -68,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
} }
ptc.ProverVersion = proverVersion.(string) ptc.ProverVersion = proverVersion.(string)
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, fmt.Errorf("get hard fork name from context failed")
}
ptc.HardForkName = hardForkName.(string)
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) { if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string)) return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
} }
vk, vkExist := b.vkMap[ptc.HardForkName]
if !vkExist {
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
}
// if the prover has a different vk // if the prover has a different vk
if getTaskParameter.VK != b.vk { if getTaskParameter.VK != vk {
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
// if the prover reports a different prover version // if the prover reports a different prover version
if !version.CheckScrollProverVersion(proverVersion.(string)) { if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string)) return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
@@ -115,3 +133,22 @@ func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error
return hardForkNumber, nil return hardForkNumber, nil
} }
var (
getTaskCounterInitOnce sync.Once
getTaskCounterVec *prometheus.CounterVec = nil
)
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_get_task_count",
Help: "Multi dimensions get task counter.",
}, []string{"task_type",
coordinatorType.LabelProverName,
coordinatorType.LabelProverPublicKey,
coordinatorType.LabelProverVersion})
})
return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType})
}

View File

@@ -134,6 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if len(pv) == 0 { if len(pv) == 0 {
return fmt.Errorf("get ProverVersion from context failed") return fmt.Errorf("get ProverVersion from context failed")
} }
hardForkName := ctx.GetString(coordinatorType.HardForkName)
var proverTask *orm.ProverTask var proverTask *orm.ProverTask
var err error var err error
@@ -156,20 +157,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
proofTimeSec := uint64(proofTime.Seconds()) proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName, log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec) "proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil { if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
return err return err
} }
m.verifierTotal.WithLabelValues(pv).Inc() m.verifierTotal.WithLabelValues(pv).Inc()
var success bool success := true
var verifyErr error var verifyErr error
if proofMsg.Type == message.ProofTypeChunk { // only verify batch proof. chunk proof verifier have been disabled after Bernoulli
success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof) if proofMsg.Type == message.ProofTypeBatch {
} else if proofMsg.Type == message.ProofTypeBatch { success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof)
} }
if verifyErr != nil || !success { if verifyErr != nil || !success {
@@ -178,7 +178,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg) m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName, log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr) "prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
if verifyErr != nil { if verifyErr != nil {
return ErrValidatorFailureVerifiedFailed return ErrValidatorFailureVerifiedFailed
@@ -189,7 +189,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds()) m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName, log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec) "prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil { if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc() m.proofSubmitFailure.Inc()
@@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
return nil return nil
} }
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) { func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
defer func() { defer func() {
if err != nil { if err != nil {
m.validateFailureTotal.Inc() m.validateFailureTotal.Inc()
@@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
"cannot submit valid proof for a prover task twice", "cannot submit valid proof for a prover task twice",
"taskType", proverTask.TaskType, "hash", proofMsg.ID, "taskType", proverTask.TaskType, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion, "proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", proverTask.ProverPublicKey, "proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
) )
return ErrValidatorFailureProverTaskCannotSubmitTwice return ErrValidatorFailureProverTaskCannotSubmitTwice
} }
@@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
log.Info("proof generated by prover failed", log.Info("proof generated by prover failed",
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName, "taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType, "proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", failureMsg) "failureMessage", failureMsg, "forkName", forkName)
return ErrValidatorFailureProofMsgStatusNotOk return ErrValidatorFailureProofMsgStatusNotOk
} }
@@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout { if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc() m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType, log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec) "proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
return ErrValidatorFailureProofTimeout return ErrValidatorFailureProofTimeout
} }
// store the proof to prover task // store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil { if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr) "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
} }
@@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) { if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
m.validateFailureProverTaskHaveVerifier.Inc() m.validateFailureProverTaskHaveVerifier.Inc()
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID, log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk) "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
return ErrValidatorFailureTaskHaveVerifiedSuccess return ErrValidatorFailureTaskHaveVerifiedSuccess
} }
return nil return nil

View File

@@ -9,8 +9,26 @@ import (
) )
// NewVerifier Sets up a mock verifier. // NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) { func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
return &Verifier{}, nil batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
} }
// VerifyChunkProof return a mock verification result for a ChunkProof. // VerifyChunkProof return a mock verification result for a ChunkProof.
@@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
} }
// VerifyBatchProof return a mock verification result for a BatchProof. // VerifyBatchProof return a mock verification result for a BatchProof.
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) { func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof) == InvalidTestProof { if string(proof.Proof) == InvalidTestProof {
return false, nil return false, nil
} }

View File

@@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier. // Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct { type Verifier struct {
cfg *config.VerifierConfig cfg *config.VerifierConfig
BatchVK string ChunkVKMap map[string]string
ChunkVK string BatchVKMap map[string]string
} }

View File

@@ -11,9 +11,11 @@ package verifier
import "C" //nolint:typecheck import "C" //nolint:typecheck
import ( import (
"embed"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"io" "io"
"io/fs"
"os" "os"
"path" "path"
"unsafe" "unsafe"
@@ -28,7 +30,26 @@ import (
// NewVerifier Sets up a rust ffi to call verify. // NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) { func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode { if cfg.MockMode {
return &Verifier{cfg: cfg}, nil batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
} }
paramsPathStr := C.CString(cfg.ParamsPath) paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath) assetsPathStr := C.CString(cfg.AssetsPath)
@@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
C.init_batch_verifier(paramsPathStr, assetsPathStr) C.init_batch_verifier(paramsPathStr, assetsPathStr)
C.init_chunk_verifier(paramsPathStr, assetsPathStr) C.init_chunk_verifier(paramsPathStr, assetsPathStr)
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey")) v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]string),
BatchVKMap: make(map[string]string),
}
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
v.BatchVKMap[cfg.ForkName] = batchVK
v.ChunkVKMap[cfg.ForkName] = chunkVK
return &Verifier{ if err := v.loadEmbedVK(); err != nil {
cfg: cfg, return nil, err
BatchVK: batchVK, }
ChunkVK: chunkVK, return v, nil
}, nil
} }
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier. // VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) { func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if v.cfg.MockMode { if v.cfg.MockMode {
log.Info("Mock mode, batch verifier disabled") log.Info("Mock mode, batch verifier disabled")
if string(proof.Proof) == InvalidTestProof { if string(proof.Proof) == InvalidTestProof {
@@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
return false, err return false, err
} }
log.Info("Start to verify batch proof", "forkName", forkName)
proofStr := C.CString(string(buf)) proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() { defer func() {
C.free(unsafe.Pointer(proofStr)) C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}() }()
log.Info("Start to verify batch proof ...") verified := C.verify_batch_proof(proofStr, forkNameStr)
verified := C.verify_batch_proof(proofStr)
return verified != 0, nil return verified != 0, nil
} }
@@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
return verified != 0, nil return verified != 0, nil
} }
func readVK(filePat string) (string, error) { func (v *Verifier) readVK(filePat string) (string, error) {
f, err := os.Open(filePat) f, err := os.Open(filePat)
if err != nil { if err != nil {
return "", err return "", err
@@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) {
} }
return base64.StdEncoding.EncodeToString(byt), nil return base64.StdEncoding.EncodeToString(byt), nil
} }
//go:embed legacy_vk/*
var legacyVKFS embed.FS
func (v *Verifier) loadEmbedVK() error {
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
if err != nil {
log.Error("load embed batch vk failure", "err", err)
return err
}
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
if err != nil {
log.Error("load embed chunk vk failure", "err", err)
return err
}
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
return nil
}

View File

@@ -14,7 +14,6 @@ import (
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config" "scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/verifier"
) )
var ( var (
@@ -34,7 +33,7 @@ func TestFFI(t *testing.T) {
AssetsPath: *assetsPath, AssetsPath: *assetsPath,
} }
v, err := verifier.NewVerifier(cfg) v, err := NewVerifier(cfg)
as.NoError(err) as.NoError(err)
chunkProof1 := readChunkProof(*chunkProofPath1, as) chunkProof1 := readChunkProof(*chunkProofPath1, as)
@@ -50,7 +49,7 @@ func TestFFI(t *testing.T) {
t.Log("Verified chunk proof 2") t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as) batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof) batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
as.NoError(err) as.NoError(err)
as.True(batchOk) as.True(batchOk)
t.Log("Verified batch proof") t.Log("Verified batch proof")

View File

@@ -24,6 +24,7 @@ type Batch struct {
// batch // batch
Index uint64 `json:"index" gorm:"column:index"` Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"` Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"` StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"` StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"` EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -54,6 +55,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"` OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"` OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata // metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -73,22 +78,16 @@ func (*Batch) TableName() string {
// GetUnassignedBatch retrieves unassigned batch based on the specified limit. // GetUnassignedBatch retrieves unassigned batch based on the specified limit.
// The returned batch are sorted in ascending order by their index. // The returned batch are sorted in ascending order by their index.
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
db = db.Where("start_chunk_index >= ?", startChunkIndex)
db = db.Where("end_chunk_index < ?", endChunkIndex)
var batch Batch var batch Batch
err := db.First(&batch).Error db := o.db.WithContext(ctx)
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) { sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
return nil, nil int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
} err := db.Raw(sql).Scan(&batch).Error
if err != nil { if err != nil {
return nil, fmt.Errorf("Batch.GetUnassignedBatches error: %w", err) return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err)
}
if batch.Hash == "" {
return nil, nil
} }
return &batch, nil return &batch, nil
} }
@@ -96,22 +95,16 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChun
// GetAssignedBatch retrieves assigned batch based on the specified limit. // GetAssignedBatch retrieves assigned batch based on the specified limit.
// The returned batch are sorted in ascending order by their index. // The returned batch are sorted in ascending order by their index.
func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
db = db.Where("start_chunk_index >= ?", startChunkIndex)
db = db.Where("end_chunk_index < ?", endChunkIndex)
var batch Batch var batch Batch
err := db.First(&batch).Error db := o.db.WithContext(ctx)
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) { sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
return nil, nil int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
} err := db.Raw(sql).Scan(&batch).Error
if err != nil { if err != nil {
return nil, fmt.Errorf("Batch.GetAssignedBatches error: %w", err) return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err)
}
if batch.Hash == "" {
return nil, nil
} }
return &batch, nil return &batch, nil
} }
@@ -260,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{ newBatch := Batch{
Index: batch.Index, Index: batch.Index,
Hash: daBatch.Hash().Hex(), Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(), StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex, StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(), EndChunkHash: endDAChunkHash.Hex(),
@@ -274,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ActiveAttempts: 0, ActiveAttempts: 0,
RollupStatus: int16(types.RollupPending), RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending), OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
} }
db := o.db db := o.db

View File

@@ -48,6 +48,10 @@ type Chunk struct {
// batch // batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"` BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata // metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"` TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"` TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -71,22 +75,16 @@ func (*Chunk) TableName() string {
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit. // GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index. // The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) { func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("start_block_number >= ?", fromBlockNum)
db = db.Where("end_block_number < ?", toBlockNum)
var chunk Chunk var chunk Chunk
err := db.First(&chunk).Error db := o.db.WithContext(ctx)
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) { sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
return nil, nil int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
} err := db.Raw(sql).Scan(&chunk).Error
if err != nil { if err != nil {
return nil, fmt.Errorf("Chunk.GetUnassignedChunks error: %w", err) return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
}
if chunk.Hash == "" {
return nil, nil
} }
return &chunk, nil return &chunk, nil
} }
@@ -94,22 +92,16 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum
// GetAssignedChunk retrieves assigned chunk based on the specified limit. // GetAssignedChunk retrieves assigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index. // The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) { func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("start_block_number >= ?", fromBlockNum)
db = db.Where("end_block_number < ?", toBlockNum)
var chunk Chunk var chunk Chunk
err := db.First(&chunk).Error db := o.db.WithContext(ctx)
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) { sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
return nil, nil int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
} err := db.Raw(sql).Scan(&chunk).Error
if err != nil { if err != nil {
return nil, fmt.Errorf("Chunk.GetAssignedChunks error: %w", err) return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
}
if chunk.Hash == "" {
return nil, nil
} }
return &chunk, nil return &chunk, nil
} }
@@ -312,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned), ProvingStatus: int16(types.ProvingTaskUnassigned),
TotalAttempts: 0, TotalAttempts: 0,
ActiveAttempts: 0, ActiveAttempts: 0,
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
} }
db := o.db db := o.db

View File

@@ -9,41 +9,36 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database" "scroll-tech/common/testcontainers"
"scroll-tech/common/docker"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/database/migrate" "scroll-tech/database/migrate"
) )
var ( var (
base *docker.App testApps *testcontainers.TestcontainerApps
db *gorm.DB db *gorm.DB
proverTaskOrm *ProverTask proverTaskOrm *ProverTask
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
t := &testing.T{} t := &testing.T{}
setupEnv(t) defer func() {
defer tearDownEnv(t) if testApps != nil {
testApps.Free()
}
tearDownEnv(t)
}()
m.Run() m.Run()
} }
func setupEnv(t *testing.T) { func setupEnv(t *testing.T) {
base = docker.NewDockerApp() testApps = testcontainers.NewTestcontainerApps()
base.RunDBImage(t) assert.NoError(t, testApps.StartPostgresContainer())
var err error var err error
db, err = database.InitDB( db, err = testApps.GetGormDBClient()
&database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -56,10 +51,11 @@ func tearDownEnv(t *testing.T) {
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
sqlDB.Close() sqlDB.Close()
base.Free()
} }
func TestProverTaskOrm(t *testing.T) { func TestProverTaskOrm(t *testing.T) {
setupEnv(t)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB)) assert.NoError(t, migrate.ResetDB(sqlDB))

View File

@@ -9,6 +9,8 @@ const (
ProverName = "prover_name" ProverName = "prover_name"
// ProverVersion the prover version for context // ProverVersion the prover version for context
ProverVersion = "prover_version" ProverVersion = "prover_version"
// HardForkName the fork name for context
HardForkName = "hard_fork_name"
) )
// Message the login message struct // Message the login message struct
@@ -16,6 +18,7 @@ type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"` Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"` ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"` ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
} }
// LoginParameter for /login api // LoginParameter for /login api

View File

@@ -2,7 +2,6 @@ package types
// GetTaskParameter for ProverTasks request parameter // GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct { type GetTaskParameter struct {
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
ProverHeight uint64 `form:"prover_height" json:"prover_height"` ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"` TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"` VK string `form:"vk" json:"vk"`

View File

@@ -0,0 +1,10 @@
package types
var (
// LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below.
LabelProverName = "prover_name"
// LabelProverPublicKey label name for prover public key
LabelProverPublicKey = "prover_pubkey"
// LabelProverVersion label name for prover version
LabelProverVersion = "prover_version"
)

View File

@@ -21,8 +21,7 @@ import (
"scroll-tech/database/migrate" "scroll-tech/database/migrate"
"scroll-tech/common/database" "scroll-tech/common/testcontainers"
"scroll-tech/common/docker"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/encoding" "scroll-tech/common/types/encoding"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
@@ -43,10 +42,9 @@ const (
) )
var ( var (
dbCfg *database.Config conf *config.Config
conf *config.Config
base *docker.App testApps *testcontainers.TestcontainerApps
db *gorm.DB db *gorm.DB
l2BlockOrm *orm.L2Block l2BlockOrm *orm.L2Block
@@ -70,13 +68,12 @@ var (
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat())) defer func() {
glogger.Verbosity(log.LvlInfo) if testApps != nil {
log.Root().SetHandler(glogger) testApps.Free()
}
base = docker.NewDockerApp() }()
m.Run() m.Run()
base.Free()
} }
func randomURL() string { func randomURL() string {
@@ -86,7 +83,8 @@ func randomURL() string {
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) { func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
var err error var err error
db, err = database.InitDB(dbCfg) db, err = testApps.GetGormDBClient()
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -98,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
ChainID: 111, ChainID: 111,
}, },
ProverManager: &config.ProverManager{ ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession, ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{MockMode: true}, Verifier: &config.VerifierConfig{
MockMode: true,
},
BatchCollectionTimeSec: 10, BatchCollectionTimeSec: 10,
ChunkCollectionTimeSec: 10, ChunkCollectionTimeSec: 10,
MaxVerifierWorkers: 10, MaxVerifierWorkers: 10,
@@ -115,6 +115,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
var chainConf params.ChainConfig var chainConf params.ChainConfig
for forkName, forkNumber := range nameForkMap { for forkName, forkNumber := range nameForkMap {
switch forkName { switch forkName {
case "shanghai":
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
case "bernoulli": case "bernoulli":
chainConf.BernoulliBlock = big.NewInt(forkNumber) chainConf.BernoulliBlock = big.NewInt(forkNumber)
case "london": case "london":
@@ -149,20 +151,18 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
} }
func setEnv(t *testing.T) { func setEnv(t *testing.T) {
var err error
version.Version = "v4.1.98" version.Version = "v4.1.98"
base = docker.NewDockerApp() glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
base.RunDBImage(t) glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
dbCfg = &database.Config{ testApps = testcontainers.NewTestcontainerApps()
DSN: base.DBConfig.DSN, assert.NoError(t, testApps.StartPostgresContainer())
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
var err error db, err = testApps.GetGormDBClient()
db, err = database.InitDB(dbCfg)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -199,7 +199,6 @@ func setEnv(t *testing.T) {
func TestApis(t *testing.T) { func TestApis(t *testing.T) {
// Set up the test environment. // Set up the test environment.
base = docker.NewDockerApp()
setEnv(t) setEnv(t)
t.Run("TestHandshake", testHandshake) t.Run("TestHandshake", testHandshake)
@@ -211,11 +210,6 @@ func TestApis(t *testing.T) {
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed) t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
t.Run("TestTimeoutProof", testTimeoutProof) t.Run("TestTimeoutProof", testTimeoutProof)
t.Run("TestHardFork", testHardForkAssignTask) t.Run("TestHardFork", testHardForkAssignTask)
// Teardown
t.Cleanup(func() {
base.Free()
})
} }
func testHandshake(t *testing.T) { func testHandshake(t *testing.T) {
@@ -268,12 +262,12 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion) expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk) code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("get empty prover task") expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code) assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
@@ -284,12 +278,12 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
expectedErr = fmt.Errorf("get empty prover task") expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk) code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code) assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion) expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
} }
@@ -309,12 +303,12 @@ func testOutdatedProverVersion(t *testing.T) {
assert.True(t, chunkProver.healthCheckSuccess(t)) assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion) expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk) code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion) expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
} }
@@ -368,7 +362,7 @@ func testHardForkAssignTask(t *testing.T) {
{ {
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0", name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch, proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree}, forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 0, exceptTaskNumber: 0,
proverForkNames: []string{"", ""}, proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData}, exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
@@ -458,7 +452,7 @@ func testHardForkAssignTask(t *testing.T) {
{ // hard fork 3, prover1:2 prover2:3 block [2-3] { // hard fork 3, prover1:2 prover2:3 block [2-3]
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0", name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk, proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree}, forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2, exceptTaskNumber: 2,
proverForkNames: []string{"", "london"}, proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success}, exceptGetTaskErrCodes: []int{types.Success, types.Success},
@@ -467,7 +461,7 @@ func testHardForkAssignTask(t *testing.T) {
{ {
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0", name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch, proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"london": forkNumberThree}, forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2, exceptTaskNumber: 2,
proverForkNames: []string{"", "london"}, proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success}, exceptGetTaskErrCodes: []int{types.Success, types.Success},
@@ -476,7 +470,7 @@ func testHardForkAssignTask(t *testing.T) {
{ // hard fork 2, prover 2 block [2-3] { // hard fork 2, prover 2 block [2-3]
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0", name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk, proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree}, forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 1, exceptTaskNumber: 1,
proverForkNames: []string{"", ""}, proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData}, exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
@@ -544,7 +538,7 @@ func testHardForkAssignTask(t *testing.T) {
continue continue
} }
getTaskNumber++ getTaskNumber++
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success) mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
} }
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber) assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
}) })
@@ -587,7 +581,7 @@ func testValidProof(t *testing.T) {
assert.Equal(t, errCode, types.Success) assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "") assert.Equal(t, errMsg, "")
assert.NotNil(t, proverTask) assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, proofStatus, types.Success) provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
} }
// verify proof status // verify proof status
@@ -653,34 +647,21 @@ func testInvalidProof(t *testing.T) {
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady) err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err) assert.NoError(t, err)
// create mock provers. proofType := message.ProofTypeBatch
provers := make([]*mockProver, 2) provingStatus := verifiedFailed
for i := 0; i < len(provers); i++ { expectErrCode := types.ErrCoordinatorHandleZkProofFailure
var proofType message.ProofType prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
if i%2 == 0 { proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
proofType = message.ProofTypeChunk assert.NotNil(t, proverTask)
} else { assert.Equal(t, errCode, types.Success)
proofType = message.ProofTypeBatch assert.Equal(t, errMsg, "")
} prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
}
// verify proof status // verify proof status
var ( var (
tick = time.Tick(1500 * time.Millisecond) tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute) tickStop = time.Tick(time.Minute)
)
var (
chunkProofStatus types.ProvingStatus
batchProofStatus types.ProvingStatus batchProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
batchActiveAttempts int16 batchActiveAttempts int16
batchMaxAttempts int16 batchMaxAttempts int16
) )
@@ -688,24 +669,17 @@ func testInvalidProof(t *testing.T) {
for { for {
select { select {
case <-tick: case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash) batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err) assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned { if batchProofStatus == types.ProvingTaskAssigned {
return return
} }
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash) batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts)) assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts)) assert.Equal(t, 0, int(batchActiveAttempts))
case <-tickStop: case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String()) t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String())
return return
} }
} }
@@ -745,7 +719,7 @@ func testProofGeneratedFailed(t *testing.T) {
assert.NotNil(t, proverTask) assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success) assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "") assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure) provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
} }
// verify proof status // verify proof status
@@ -868,14 +842,14 @@ func testTimeoutProof(t *testing.T) {
assert.NotNil(t, proverChunkTask2) assert.NotNil(t, proverChunkTask2)
assert.Equal(t, chunkTask2ErrCode, types.Success) assert.Equal(t, chunkTask2ErrCode, types.Success)
assert.Equal(t, chunkTask2ErrMsg, "") assert.Equal(t, chunkTask2ErrMsg, "")
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success) chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version) batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul") proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
assert.NotNil(t, proverBatchTask2) assert.NotNil(t, proverBatchTask2)
assert.Equal(t, batchTask2ErrCode, types.Success) assert.Equal(t, batchTask2ErrCode, types.Success)
assert.Equal(t, batchTask2ErrMsg, "") assert.Equal(t, batchTask2ErrMsg, "")
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success) batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
// verify proof status, it should be verified now, because second prover sent valid proof // verify proof status, it should be verified now, because second prover sent valid proof
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)

View File

@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
} }
// connectToCoordinator sets up a websocket client to connect to the prover manager. // connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator(t *testing.T) string { func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
challengeString := r.challenge(t) challengeString := r.challenge(t)
return r.login(t, challengeString) return r.login(t, challengeString, forkName)
} }
func (r *mockProver) challenge(t *testing.T) string { func (r *mockProver) challenge(t *testing.T) string {
@@ -76,18 +76,32 @@ func (r *mockProver) challenge(t *testing.T) string {
return loginData.Token return loginData.Token
} }
func (r *mockProver) login(t *testing.T, challengeString string) string { func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
authMsg := message.AuthMsg{ var body string
Identity: &message.Identity{ if forkName != "" {
Challenge: challengeString, authMsg := message.AuthMsg{
ProverName: r.proverName, Identity: &message.Identity{
ProverVersion: r.proverVersion, Challenge: challengeString,
}, ProverName: r.proverName,
ProverVersion: r.proverVersion,
HardForkName: forkName,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
} }
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
var result ctypes.Response var result ctypes.Response
client := resty.New() client := resty.New()
@@ -137,7 +151,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) { func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
// get task from coordinator // get task from coordinator
token := r.connectToCoordinator(t) token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token) assert.NotEmpty(t, token)
type response struct { type response struct {
@@ -151,7 +165,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
resp, err := client.R(). resp, err := client.R().
SetHeader("Content-Type", "application/json"). SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)). SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}). SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetResult(&result). SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task") Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err) assert.NoError(t, err)
@@ -160,9 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
} }
// Testing expected errors returned by coordinator. // Testing expected errors returned by coordinator.
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) { //
//nolint:unparam
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
// get task from coordinator // get task from coordinator
token := r.connectToCoordinator(t) token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token) assert.NotEmpty(t, token)
type response struct { type response struct {
@@ -185,7 +201,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
return result.ErrCode, result.ErrMsg return result.ErrCode, result.ErrMsg
} }
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) { func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
proofMsgStatus := message.StatusOk proofMsgStatus := message.StatusOk
if proofStatus == generatedFailed { if proofStatus == generatedFailed {
proofMsgStatus = message.StatusProofError proofMsgStatus = message.StatusProofError
@@ -228,7 +244,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
submitProof.Proof = string(encodeData) submitProof.Proof = string(encodeData)
} }
token := r.connectToCoordinator(t) token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token) assert.NotEmpty(t, token)
submitProofData, err := json.Marshal(submitProof) submitProofData, err := json.Marshal(submitProof)

View File

@@ -1,93 +1,89 @@
package migrate package migrate
import ( import (
"database/sql"
"testing" "testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" _ "github.com/lib/pq"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/docker" "scroll-tech/common/testcontainers"
"scroll-tech/database"
) )
var ( var (
base *docker.App testApps *testcontainers.TestcontainerApps
pgDB *sqlx.DB pgDB *sql.DB
) )
func initEnv(t *testing.T) error { func setupEnv(t *testing.T) {
// Start db container. // Start db container.
base.RunDBImage(t) testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
gormClient, err := testApps.GetGormDBClient()
assert.NoError(t, err)
pgDB, err = gormClient.DB()
assert.NoError(t, err)
}
// Create db orm handler. func TestMain(m *testing.M) {
factory, err := database.NewOrmFactory(base.DBConfig) defer func() {
if err != nil { if testApps != nil {
return err testApps.Free()
} }
pgDB = factory.GetDB() }()
return nil m.Run()
} }
func TestMigrate(t *testing.T) { func TestMigrate(t *testing.T) {
base = docker.NewDockerApp() setupEnv(t)
if err := initEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testCurrent", testCurrent) t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus) t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB) t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate) t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback) t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
} }
func testCurrent(t *testing.T) { func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB) cur, err := Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(0), cur) assert.Equal(t, int64(0), cur)
} }
func testStatus(t *testing.T) { func testStatus(t *testing.T) {
status := Status(pgDB.DB) status := Status(pgDB)
assert.NoError(t, status) assert.NoError(t, status)
} }
func testResetDB(t *testing.T) { func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB)) assert.NoError(t, ResetDB(pgDB))
cur, err := Current(pgDB.DB) cur, err := Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
// total number of tables. // total number of tables.
assert.Equal(t, int64(16), cur) assert.Equal(t, int64(17), cur)
} }
func testMigrate(t *testing.T) { func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB)) assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB.DB) cur, err := Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(16), cur) assert.Equal(t, int64(17), cur)
} }
func testRollback(t *testing.T) { func testRollback(t *testing.T) {
version, err := Current(pgDB.DB) version, err := Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(16), version) assert.Equal(t, int64(17), version)
assert.NoError(t, Rollback(pgDB.DB, nil)) assert.NoError(t, Rollback(pgDB, nil))
cur, err := Current(pgDB.DB) cur, err := Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, version, cur+1) assert.Equal(t, version, cur+1)
targetVersion := int64(0) targetVersion := int64(0)
assert.NoError(t, Rollback(pgDB.DB, &targetVersion)) assert.NoError(t, Rollback(pgDB, &targetVersion))
cur, err = Current(pgDB.DB) cur, err = Current(pgDB)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, int64(0), cur) assert.Equal(t, int64(0), cur)
} }

View File

@@ -0,0 +1,27 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE chunk
ADD COLUMN crc_max INTEGER DEFAULT 0,
ADD COLUMN blob_size INTEGER DEFAULT 0;
ALTER TABLE batch
ADD COLUMN data_hash VARCHAR DEFAULT '',
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
ADD COLUMN blob_size INTEGER DEFAULT 0;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS batch
DROP COLUMN data_hash,
DROP COLUMN blob_data_proof,
DROP COLUMN blob_size;
ALTER TABLE IF EXISTS chunk
DROP COLUMN crc_max,
DROP COLUMN blob_size;
-- +goose StatementEnd

View File

@@ -1559,6 +1559,9 @@ github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZy
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs= github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM= github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE= github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8= github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=

View File

@@ -21,14 +21,15 @@ import (
type CoordinatorClient struct { type CoordinatorClient struct {
client *resty.Client client *resty.Client
proverName string proverName string
priv *ecdsa.PrivateKey hardForkName string
priv *ecdsa.PrivateKey
mu sync.Mutex mu sync.Mutex
} }
// NewCoordinatorClient constructs a new CoordinatorClient. // NewCoordinatorClient constructs a new CoordinatorClient.
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) { func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
client := resty.New(). client := resty.New().
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second). SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
SetRetryCount(cfg.RetryCount). SetRetryCount(cfg.RetryCount).
@@ -50,9 +51,10 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
"retry wait time (second)", cfg.RetryWaitTimeSec) "retry wait time (second)", cfg.RetryWaitTimeSec)
return &CoordinatorClient{ return &CoordinatorClient{
client: client, client: client,
proverName: proverName, proverName: proverName,
priv: priv, hardForkName: hardForkName,
priv: priv,
}, nil }, nil
} }
@@ -83,6 +85,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
ProverVersion: version.Version, ProverVersion: version.Version,
ProverName: c.proverName, ProverName: c.proverName,
Challenge: challengeResult.Data.Token, Challenge: challengeResult.Data.Token,
HardForkName: c.hardForkName,
}, },
} }
@@ -97,10 +100,12 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
Challenge string `json:"challenge"` Challenge string `json:"challenge"`
ProverName string `json:"prover_name"` ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"` ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
}{ }{
Challenge: authMsg.Identity.Challenge, Challenge: authMsg.Identity.Challenge,
ProverName: authMsg.Identity.ProverName, ProverName: authMsg.Identity.ProverName,
ProverVersion: authMsg.Identity.ProverVersion, ProverVersion: authMsg.Identity.ProverVersion,
HardForkName: authMsg.Identity.HardForkName,
}, },
Signature: authMsg.Signature, Signature: authMsg.Signature,
} }

View File

@@ -25,6 +25,7 @@ type LoginRequest struct {
Challenge string `json:"challenge"` Challenge string `json:"challenge"`
ProverName string `json:"prover_name"` ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"` ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
} `json:"message"` } `json:"message"`
Signature string `json:"signature"` Signature string `json:"signature"`
} }
@@ -41,7 +42,6 @@ type LoginResponse struct {
// GetTaskRequest defines the request structure for GetTask API // GetTaskRequest defines the request structure for GetTask API
type GetTaskRequest struct { type GetTaskRequest struct {
HardForkName string `json:"hard_fork_name"`
TaskType message.ProofType `json:"task_type"` TaskType message.ProofType `json:"task_type"`
ProverHeight uint64 `json:"prover_height,omitempty"` ProverHeight uint64 `json:"prover_height,omitempty"`
VK string `json:"vk"` VK string `json:"vk"`

View File

@@ -12,7 +12,7 @@ import (
"scroll-tech/prover/config" "scroll-tech/prover/config"
"scroll-tech/common/cmd" "scroll-tech/common/cmd"
"scroll-tech/common/docker" "scroll-tech/common/testcontainers"
"scroll-tech/common/types/message" "scroll-tech/common/types/message"
"scroll-tech/common/utils" "scroll-tech/common/utils"
) )
@@ -30,7 +30,7 @@ func getIndex() int {
type ProverApp struct { type ProverApp struct {
Config *config.Config Config *config.Config
base *docker.App testApps *testcontainers.TestcontainerApps
originFile string originFile string
proverFile string proverFile string
@@ -39,11 +39,11 @@ type ProverApp struct {
index int index int
name string name string
args []string args []string
docker.AppAPI *cmd.Cmd
} }
// NewProverApp return a new proverApp manager. // NewProverApp return a new proverApp manager.
func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, httpURL string) *ProverApp { func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
var proofType message.ProofType var proofType message.ProofType
switch mockName { switch mockName {
case utils.ChunkProverApp: case utils.ChunkProverApp:
@@ -54,17 +54,17 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
return nil return nil
} }
name := string(mockName) name := string(mockName)
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", base.Timestamp, name) proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", testApps.Timestamp, name)
proverApp := &ProverApp{ proverApp := &ProverApp{
base: base, testApps: testApps,
originFile: file, originFile: file,
proverFile: proverFile, proverFile: proverFile,
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", base.Timestamp, name), bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", testApps.Timestamp, name),
index: getIndex(), index: getIndex(),
name: name, name: name,
args: []string{"--log.debug", "--config", proverFile}, args: []string{"--log.debug", "--config", proverFile},
} }
proverApp.AppAPI = cmd.NewCmd(proverApp.name, proverApp.args...) proverApp.Cmd = cmd.NewCmd(proverApp.name, proverApp.args...)
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil { if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
panic(err) panic(err)
} }
@@ -73,13 +73,13 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
// RunApp run prover-test child process by multi parameters. // RunApp run prover-test child process by multi parameters.
func (r *ProverApp) RunApp(t *testing.T) { func (r *ProverApp) RunApp(t *testing.T) {
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") }) r.Cmd.RunApp(func() bool { return r.Cmd.WaitResult(t, time.Second*40, "prover start successfully") })
} }
// Free stop and release prover-test. // Free stop and release prover-test.
func (r *ProverApp) Free() { func (r *ProverApp) Free() {
if !utils.IsNil(r.AppAPI) { if !utils.IsNil(r.Cmd) {
r.AppAPI.WaitExit() r.Cmd.WaitExit()
} }
_ = os.Remove(r.proverFile) _ = os.Remove(r.proverFile)
_ = os.Remove(r.Config.KeystorePath) _ = os.Remove(r.Config.KeystorePath)
@@ -93,8 +93,13 @@ func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.Pro
return err return err
} }
cfg.ProverName = fmt.Sprintf("%s_%d", r.name, r.index) cfg.ProverName = fmt.Sprintf("%s_%d", r.name, r.index)
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.base.Timestamp, cfg.ProverName) cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.testApps.Timestamp, cfg.ProverName)
cfg.L2Geth.Endpoint = r.base.L2gethImg.Endpoint()
endpoint, err := r.testApps.GetL2GethEndPoint()
if err != nil {
return err
}
cfg.L2Geth.Endpoint = endpoint
cfg.L2Geth.Confirmations = rpc.LatestBlockNumber cfg.L2Geth.Confirmations = rpc.LatestBlockNumber
// Reuse l1geth's keystore file // Reuse l1geth's keystore file
cfg.KeystorePassword = "scrolltest" cfg.KeystorePassword = "scrolltest"

View File

@@ -82,7 +82,7 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
} }
log.Info("init prover_core successfully!") log.Info("init prover_core successfully!")
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv) coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error {
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) { func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// prepare the request // prepare the request
req := &client.GetTaskRequest{ req := &client.GetTaskRequest{
HardForkName: r.cfg.HardForkName, TaskType: r.Type(),
TaskType: r.Type(),
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask // we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
// instead of passing vk when we login // instead of passing vk when we login
VK: r.proverCore.VK, VK: r.proverCore.VK,

View File

@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
} }
}) })
log.Info("Start event-watcher successfully") log.Info("Start event-watcher successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown. // Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1) interrupt := make(chan os.Signal, 1)

View File

@@ -109,7 +109,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle) go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions // Finish start all message relayer functions
log.Info("Start gas-oracle successfully") log.Info("Start gas-oracle successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown. // Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1) interrupt := make(chan os.Signal, 1)

View File

@@ -7,22 +7,19 @@ import (
"testing" "testing"
"time" "time"
"scroll-tech/rollup/internal/config"
"scroll-tech/common/cmd" "scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers" "scroll-tech/common/testcontainers"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/rollup/internal/config"
) )
// MockApp mockApp-test client manager. // MockApp mockApp-test client manager.
type MockApp struct { type MockApp struct {
Config *config.Config Config *config.Config
// TODO field willl be replaced by testApps
base *docker.App
testApps *testcontainers.TestcontainerApps testApps *testcontainers.TestcontainerApps
mockApps map[utils.MockAppName]docker.AppAPI mockApps map[utils.MockAppName]*cmd.Cmd
originFile string originFile string
rollupFile string rollupFile string
@@ -30,12 +27,12 @@ type MockApp struct {
args []string args []string
} }
// NewRollupApp TODO function will be replaced by NewRollupApp2 // NewRollupApp return a new rollupApp manager.
func NewRollupApp(base *docker.App, file string) *MockApp { func NewRollupApp(testApps *testcontainers.TestcontainerApps, file string) *MockApp {
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", base.Timestamp) rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", testApps.Timestamp)
rollupApp := &MockApp{ rollupApp := &MockApp{
base: base, testApps: testApps,
mockApps: make(map[utils.MockAppName]docker.AppAPI), mockApps: make(map[utils.MockAppName]*cmd.Cmd),
originFile: file, originFile: file,
rollupFile: rollupFile, rollupFile: rollupFile,
args: []string{"--log.debug", "--config", rollupFile}, args: []string{"--log.debug", "--config", rollupFile},
@@ -46,22 +43,6 @@ func NewRollupApp(base *docker.App, file string) *MockApp {
return rollupApp return rollupApp
} }
// NewRollupApp2 return a new rollupApp manager, name mush be one them.
func NewRollupApp2(testApps *testcontainers.TestcontainerApps, file string) *MockApp {
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", testApps.Timestamp)
rollupApp := &MockApp{
testApps: testApps,
mockApps: make(map[utils.MockAppName]docker.AppAPI),
originFile: file,
rollupFile: rollupFile,
args: []string{"--log.debug", "--config", rollupFile},
}
if err := rollupApp.MockConfig2(true); err != nil {
panic(err)
}
return rollupApp
}
// RunApp run rollup-test child process by multi parameters. // RunApp run rollup-test child process by multi parameters.
func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) { func (b *MockApp) RunApp(t *testing.T, name utils.MockAppName, args ...string) {
if !(name == utils.EventWatcherApp || if !(name == utils.EventWatcherApp ||
@@ -87,7 +68,7 @@ func (b *MockApp) WaitExit() {
for _, app := range b.mockApps { for _, app := range b.mockApps {
app.WaitExit() app.WaitExit()
} }
b.mockApps = make(map[utils.MockAppName]docker.AppAPI) b.mockApps = make(map[utils.MockAppName]*cmd.Cmd)
} }
// Free stop and release rollup mocked apps. // Free stop and release rollup mocked apps.
@@ -96,35 +77,8 @@ func (b *MockApp) Free() {
_ = os.Remove(b.rollupFile) _ = os.Remove(b.rollupFile)
} }
// MockConfig TODO function will be replaced by MockConfig2 // MockConfig creates a new rollup config.
func (b *MockApp) MockConfig(store bool) error { func (b *MockApp) MockConfig(store bool) error {
base := b.base
// Load origin rollup config file.
cfg, err := config.NewConfig(b.originFile)
if err != nil {
return err
}
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.DBConfig.DSN = base.DBImg.Endpoint()
b.Config = cfg
if !store {
return nil
}
// Store changed rollup config into a temp file.
data, err := json.Marshal(b.Config)
if err != nil {
return err
}
return os.WriteFile(b.rollupFile, data, 0600)
}
// MockConfig2 creates a new rollup config.
func (b *MockApp) MockConfig2(store bool) error {
// Load origin rollup config file. // Load origin rollup config file.
cfg, err := config.NewConfig(b.originFile) cfg, err := config.NewConfig(b.originFile)
if err != nil { if err != nil {

View File

@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches) go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions. // Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully") log.Info("Start rollup-relayer successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown. // Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1) interrupt := make(chan os.Signal, 1)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,103 @@
package main
import (
"context"
"encoding/hex"
"os"
"strconv"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/database"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv1"
"scroll-tech/rollup/internal/orm"
)
func main() {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
if len(os.Args) < 2 {
log.Crit("no batch index provided")
return
}
batchIndexStr := os.Args[1]
batchIndexInt, err := strconv.Atoi(batchIndexStr)
if err != nil || batchIndexInt <= 0 {
log.Crit("invalid batch index", "indexStr", batchIndexStr, "err", err)
return
}
batchIndex := uint64(batchIndexInt)
db, err := database.InitDB(&database.Config{
DriverName: "postgres",
DSN: os.Getenv("DB_DSN"),
MaxOpenNum: 200,
MaxIdleNum: 20,
})
if err != nil {
log.Crit("failed to init db", "err", err)
}
defer func() {
if deferErr := database.CloseDB(db); deferErr != nil {
log.Error("failed to close db", "err", err)
}
}()
l2BlockOrm := orm.NewL2Block(db)
chunkOrm := orm.NewChunk(db)
batchOrm := orm.NewBatch(db)
dbBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex)
if err != nil {
log.Crit("failed to get batch", "index", batchIndex, "err", err)
return
}
dbParentBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex-1)
if err != nil {
log.Crit("failed to get batch", "index", batchIndex-1, "err", err)
return
}
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
if err != nil {
log.Crit("failed to fetch chunks", "err", err)
return
}
chunks := make([]*encoding.Chunk, len(dbChunks))
for i, c := range dbChunks {
blocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), c.StartBlockNumber, c.EndBlockNumber)
if err != nil {
log.Crit("failed to fetch blocks", "err", err)
return
}
chunks[i] = &encoding.Chunk{Blocks: blocks}
}
batch := &encoding.Batch{
Index: dbBatch.Index,
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
Chunks: chunks,
}
daBatch, err := codecv1.NewDABatch(batch)
if err != nil {
log.Crit("failed to create DA batch", "err", err)
return
}
blobDataProof, err := daBatch.BlobDataProof()
if err != nil {
log.Crit("failed to get blob data proof", "err", err)
return
}
log.Info("batchMeta", "batchHash", daBatch.Hash().Hex(), "batchDataHash", daBatch.DataHash.Hex(), "blobDataProof", hex.EncodeToString(blobDataProof), "blobData", hex.EncodeToString(daBatch.Blob()[:]))
}

View File

@@ -585,6 +585,24 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err) log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
return err return err
} }
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
if !withProof {
txErr := r.db.Transaction(func(tx *gorm.DB) error {
if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
return updateErr
}
if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
return updateErr
}
return nil
})
if txErr != nil {
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr)
}
}
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc() r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
return nil return nil
} }

View File

@@ -7,6 +7,7 @@ import (
"net/http" "net/http"
"strings" "strings"
"testing" "testing"
"time"
"github.com/agiledragon/gomonkey/v2" "github.com/agiledragon/gomonkey/v2"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@@ -181,9 +182,9 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err) assert.NoError(t, err)
chunkOrm := orm.NewChunk(db) chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion) chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err) assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion) chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err) assert.NoError(t, err)
batch := &encoding.Batch{ batch := &encoding.Batch{
@@ -200,11 +201,30 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err) assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
assert.NoError(t, err)
// Check the database for the updated status using TryTimes. // Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool { ok := utils.TryTimes(5, func() bool {
relayer.ProcessCommittedBatches() relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash}) time.Sleep(time.Second)
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
if batchErr != nil {
return false
}
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
if chunkErr != nil {
return false
}
batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing &&
types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
return batchStatus && chunkStatus
}) })
assert.True(t, ok) assert.True(t, ok)
relayer.StopSenders() relayer.StopSenders()

View File

@@ -14,8 +14,8 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"scroll-tech/common/database" "scroll-tech/common/database"
"scroll-tech/common/docker"
dockercompose "scroll-tech/common/docker-compose/l1" dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/encoding" "scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0" "scroll-tech/common/types/encoding/codecv0"
@@ -26,7 +26,7 @@ var (
// config // config
cfg *config.Config cfg *config.Config
base *docker.App testApps *testcontainers.TestcontainerApps
posL1TestEnv *dockercompose.PoSL1TestEnv posL1TestEnv *dockercompose.PoSL1TestEnv
// l2geth client // l2geth client
@@ -53,16 +53,25 @@ func setupEnv(t *testing.T) {
cfg, err = config.NewConfig("../../../conf/config.json") cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err) assert.NoError(t, err)
base.RunL2Geth(t) posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
base.RunDBImage(t) assert.NoError(t, err, "failed to create PoS L1 test environment")
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL2GethContainer())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
dsn, err := testApps.GetDBEndPoint()
assert.NoError(t, err)
cfg.DBConfig = &database.Config{ cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN, DSN: dsn,
DriverName: base.DBConfig.DriverName, DriverName: "postgres",
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: 200,
MaxIdleNum: base.DBConfig.MaxIdleNum, MaxIdleNum: 20,
} }
port, err := rand.Int(rand.Reader, big.NewInt(10000)) port, err := rand.Int(rand.Reader, big.NewInt(10000))
assert.NoError(t, err) assert.NoError(t, err)
@@ -70,7 +79,7 @@ func setupEnv(t *testing.T) {
cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
// Create l2geth client. // Create l2geth client.
l2Cli, err = base.L2Client() l2Cli, err = testApps.GetL2GethClient()
assert.NoError(t, err) assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json") templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
@@ -97,19 +106,14 @@ func setupEnv(t *testing.T) {
} }
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() defer func() {
base.Free() if testApps != nil {
testApps.Free()
var err error }
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv() if posL1TestEnv != nil {
if err != nil { posL1TestEnv.Stop()
log.Crit("failed to create PoS L1 test environment", "err", err) }
} }()
if err := posL1TestEnv.Start(); err != nil {
log.Crit("failed to start PoS L1 test environment", "err", err)
}
defer posL1TestEnv.Stop()
m.Run() m.Run()
} }

View File

@@ -26,12 +26,10 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/database/migrate"
"scroll-tech/common/database"
"scroll-tech/common/docker"
dockercompose "scroll-tech/common/docker-compose/l1" dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/database/migrate"
bridgeAbi "scroll-tech/rollup/abi" bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/config" "scroll-tech/rollup/internal/config"
@@ -39,12 +37,10 @@ import (
"scroll-tech/rollup/mock_bridge" "scroll-tech/rollup/mock_bridge"
) )
const TXBatch = 50
var ( var (
privateKey *ecdsa.PrivateKey privateKey *ecdsa.PrivateKey
cfg *config.Config cfg *config.Config
base *docker.App testApps *testcontainers.TestcontainerApps
posL1TestEnv *dockercompose.PoSL1TestEnv posL1TestEnv *dockercompose.PoSL1TestEnv
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"} txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
txBlob = []*kzg4844.Blob{nil, nil, randBlob()} txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
@@ -54,19 +50,14 @@ var (
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() defer func() {
defer base.Free() if testApps != nil {
testApps.Free()
var err error }
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv() if posL1TestEnv != nil {
if err != nil { posL1TestEnv.Stop()
log.Crit("failed to create PoS L1 test environment", "err", err) }
} }()
if err := posL1TestEnv.Start(); err != nil {
log.Crit("failed to start PoS L1 test environment", "err", err)
}
defer posL1TestEnv.Stop()
m.Run() m.Run()
} }
@@ -82,17 +73,18 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
privateKey = priv privateKey = priv
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
assert.NoError(t, err, "failed to create PoS L1 test environment")
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
base.RunDBImage(t) db, err = testApps.GetGormDBClient()
db, err = database.InitDB(
&database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -27,7 +27,7 @@ import (
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) { func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
db := setupDB(t) db := setupDB(t)
client, err := ethclient.Dial(base.L1gethImg.Endpoint()) client, err := testApps.GetL1GethClient()
assert.NoError(t, err) assert.NoError(t, err)
l1Cfg := cfg.L1Config l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil) watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)

View File

@@ -11,9 +11,8 @@ import (
"gorm.io/gorm" "gorm.io/gorm"
"scroll-tech/common/database" "scroll-tech/common/database"
"scroll-tech/common/docker" "scroll-tech/common/testcontainers"
"scroll-tech/common/types/encoding" "scroll-tech/common/types/encoding"
"scroll-tech/database/migrate" "scroll-tech/database/migrate"
"scroll-tech/rollup/internal/config" "scroll-tech/rollup/internal/config"
@@ -23,7 +22,7 @@ var (
// config // config
cfg *config.Config cfg *config.Config
base *docker.App testApps *testcontainers.TestcontainerApps
// l2geth client // l2geth client
l2Cli *ethclient.Client l2Cli *ethclient.Client
@@ -42,19 +41,27 @@ func setupEnv(t *testing.T) (err error) {
cfg, err = config.NewConfig("../../../conf/config.json") cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err) assert.NoError(t, err)
base.RunImages(t) testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint() cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL1GethEndPoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint() assert.NoError(t, err)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
dsn, err := testApps.GetDBEndPoint()
assert.NoError(t, err)
cfg.DBConfig = &database.Config{ cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN, DSN: dsn,
DriverName: base.DBConfig.DriverName, DriverName: "postgres",
MaxOpenNum: base.DBConfig.MaxOpenNum, MaxOpenNum: 200,
MaxIdleNum: base.DBConfig.MaxIdleNum, MaxIdleNum: 20,
} }
// Create l2geth client. // Create l2geth client.
l2Cli, err = base.L2Client() l2Cli, err = testApps.GetL2GethClient()
assert.NoError(t, err) assert.NoError(t, err)
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json") block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
@@ -73,11 +80,12 @@ func setupDB(t *testing.T) *gorm.DB {
} }
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() defer func() {
if testApps != nil {
testApps.Free()
}
}()
m.Run() m.Run()
base.Free()
} }
func TestFunction(t *testing.T) { func TestFunction(t *testing.T) {

View File

@@ -25,6 +25,7 @@ type Batch struct {
// batch // batch
Index uint64 `json:"index" gorm:"column:index"` Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"` Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"` StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"` StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"` EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -53,6 +54,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"` OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"` OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata // metadata
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"` TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"` TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
@@ -257,6 +262,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
newBatch := Batch{ newBatch := Batch{
Index: batch.Index, Index: batch.Index,
Hash: batchMeta.BatchHash.Hex(), Hash: batchMeta.BatchHash.Hex(),
DataHash: batchMeta.BatchDataHash.Hex(),
StartChunkHash: batchMeta.StartChunkHash.Hex(), StartChunkHash: batchMeta.StartChunkHash.Hex(),
StartChunkIndex: startChunkIndex, StartChunkIndex: startChunkIndex,
EndChunkHash: batchMeta.EndChunkHash.Hex(), EndChunkHash: batchMeta.EndChunkHash.Hex(),
@@ -271,6 +277,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
OracleStatus: int16(types.GasOraclePending), OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: metrics.L1CommitGas, TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize, TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
BlobDataProof: batchMeta.BatchBlobDataProof,
BlobSize: metrics.L1CommitBlobSize,
} }
db := o.db db := o.db

View File

@@ -44,6 +44,10 @@ type Chunk struct {
// batch // batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"` BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata // metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"` TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"` TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -140,6 +144,20 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) (
return chunks, nil return chunks, nil
} }
// GetChunksByBatchHash retrieves chunks by batch hash
// for test
func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash = ?", batchHash)
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetChunksByBatchHash error: %w", err)
}
return chunks, nil
}
// InsertChunk inserts a new chunk into the database. // InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) { func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 { if chunk == nil || len(chunk.Blocks) == 0 {
@@ -198,6 +216,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
ParentChunkStateRoot: parentChunkStateRoot, ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(), WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned), ProvingStatus: int16(types.ProvingTaskUnassigned),
CrcMax: metrics.CrcMax,
BlobSize: metrics.L1CommitBlobSize,
} }
db := o.db db := o.db
@@ -242,6 +262,34 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
return nil return nil
} }
// UpdateProvingStatusByBatchHash updates the proving_status for chunks within the specified batch_hash
func (o *Chunk) UpdateProvingStatusByBatchHash(ctx context.Context, batchHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash = ?", batchHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatusByBatchHash error: %w, batch hash: %v, status: %v", err, batchHash, status.String())
}
return nil
}
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive). // UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices. // The range is closed, i.e., it includes both start and end indices.
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error { func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {

View File

@@ -14,8 +14,7 @@ import (
"scroll-tech/database/migrate" "scroll-tech/database/migrate"
"scroll-tech/common/database" "scroll-tech/common/testcontainers"
"scroll-tech/common/docker"
"scroll-tech/common/types" "scroll-tech/common/types"
"scroll-tech/common/types/encoding" "scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0" "scroll-tech/common/types/encoding/codecv0"
@@ -23,7 +22,7 @@ import (
) )
var ( var (
base *docker.App testApps *testcontainers.TestcontainerApps
db *gorm.DB db *gorm.DB
l2BlockOrm *L2Block l2BlockOrm *L2Block
@@ -37,23 +36,23 @@ var (
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
t := &testing.T{} t := &testing.T{}
defer func() {
if testApps != nil {
testApps.Free()
}
tearDownEnv(t)
}()
setupEnv(t) setupEnv(t)
defer tearDownEnv(t)
m.Run() m.Run()
} }
func setupEnv(t *testing.T) { func setupEnv(t *testing.T) {
base = docker.NewDockerApp()
base.RunDBImage(t)
var err error var err error
db, err = database.InitDB(
&database.Config{ testApps = testcontainers.NewTestcontainerApps()
DSN: base.DBConfig.DSN, assert.NoError(t, testApps.StartPostgresContainer())
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum, db, err = testApps.GetGormDBClient()
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -81,7 +80,6 @@ func tearDownEnv(t *testing.T) {
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
sqlDB.Close() sqlDB.Close()
base.Free()
} }
func TestL1BlockOrm(t *testing.T) { func TestL1BlockOrm(t *testing.T) {

View File

@@ -191,10 +191,12 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code
// BatchMetadata represents the metadata of a batch. // BatchMetadata represents the metadata of a batch.
type BatchMetadata struct { type BatchMetadata struct {
BatchHash common.Hash BatchHash common.Hash
BatchBytes []byte BatchDataHash common.Hash
StartChunkHash common.Hash BatchBlobDataProof []byte
EndChunkHash common.Hash BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
} }
// GetBatchMetadata retrieves the metadata of a batch. // GetBatchMetadata retrieves the metadata of a batch.
@@ -212,9 +214,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err) return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
} }
// BatchBlobDataProof is left as empty for codecv0.
batchMeta := &BatchMetadata{ batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(), BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(), BatchDataHash: daBatch.DataHash,
BatchBytes: daBatch.Encode(),
} }
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore) startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
@@ -243,9 +247,16 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err) return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
} }
blobDataProof, err := daBatch.BlobDataProof()
if err != nil {
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
}
batchMeta := &BatchMetadata{ batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(), BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(), BatchDataHash: daBatch.DataHash,
BatchBlobDataProof: blobDataProof,
BatchBytes: daBatch.Encode(),
} }
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore) startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)

View File

@@ -97,7 +97,7 @@ func setupEnv(t *testing.T) {
assert.NoError(t, testApps.StartPostgresContainer()) assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer()) assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer()) assert.NoError(t, testApps.StartL2GethContainer())
rollupApp = bcmd.NewRollupApp2(testApps, "../conf/config.json") rollupApp = bcmd.NewRollupApp(testApps, "../conf/config.json")
l1Client, err = posL1TestEnv.L1Client() l1Client, err = posL1TestEnv.L1Client()
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -20,11 +20,11 @@ var (
greeterAddress = common.HexToAddress("0x7363726f6c6c6c20000000000000000000000015") greeterAddress = common.HexToAddress("0x7363726f6c6c6c20000000000000000000000015")
) )
func TestERC20(t *testing.T) { func testERC20(t *testing.T) {
base.RunL2Geth(t) assert.NoError(t, testApps.StartL2GethContainer())
time.Sleep(time.Second * 3) time.Sleep(time.Second * 3)
l2Cli, err := base.L2Client() l2Cli, err := testApps.GetL2GethClient()
assert.Nil(t, err) assert.Nil(t, err)
token, err := erc20.NewERC20Mock(erc20Address, l2Cli) token, err := erc20.NewERC20Mock(erc20Address, l2Cli)
@@ -32,7 +32,9 @@ func TestERC20(t *testing.T) {
privKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212")) privKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err) assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privKey, base.L2gethImg.ChainID()) chainID, err := l2Cli.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privKey, chainID)
assert.NoError(t, err) assert.NoError(t, err)
authBls0, err := token.BalanceOf(nil, auth.From) authBls0, err := token.BalanceOf(nil, auth.From)
@@ -45,7 +47,8 @@ func TestERC20(t *testing.T) {
value := big.NewInt(1000) value := big.NewInt(1000)
tx, err := token.Transfer(auth, erc20Address, value) tx, err := token.Transfer(auth, erc20Address, value)
assert.NoError(t, err) assert.NoError(t, err)
bind.WaitMined(context.Background(), l2Cli, tx) _, err = bind.WaitMined(context.Background(), l2Cli, tx)
assert.NoError(t, err)
authBls1, err := token.BalanceOf(nil, auth.From) authBls1, err := token.BalanceOf(nil, auth.From)
assert.NoError(t, err) assert.NoError(t, err)
@@ -58,12 +61,14 @@ func TestERC20(t *testing.T) {
assert.Equal(t, tokenBls1.Int64(), tokenBls0.Add(tokenBls0, value).Int64()) assert.Equal(t, tokenBls1.Int64(), tokenBls0.Add(tokenBls0, value).Int64())
} }
func TestGreeter(t *testing.T) { func testGreeter(t *testing.T) {
base.RunL2Geth(t) assert.NoError(t, testApps.StartL2GethContainer())
l2Cli, err := base.L2Client() l2Cli, err := testApps.GetL2GethClient()
assert.Nil(t, err) assert.Nil(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, base.L2gethImg.ChainID()) chainID, err := l2Cli.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, chainID)
assert.NoError(t, err) assert.NoError(t, err)
token, err := greeter.NewGreeter(greeterAddress, l2Cli) token, err := greeter.NewGreeter(greeterAddress, l2Cli)

View File

@@ -10,52 +10,63 @@ import (
"github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types" gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/integration-test/orm" "scroll-tech/common/testcontainers"
rapp "scroll-tech/prover/cmd/app"
"scroll-tech/database/migrate"
capp "scroll-tech/coordinator/cmd/api/app"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types/encoding" "scroll-tech/common/types/encoding"
"scroll-tech/common/utils" "scroll-tech/common/utils"
"scroll-tech/common/version" "scroll-tech/common/version"
capp "scroll-tech/coordinator/cmd/api/app"
"scroll-tech/database/migrate"
"scroll-tech/integration-test/orm"
rapp "scroll-tech/prover/cmd/app"
bcmd "scroll-tech/rollup/cmd" bcmd "scroll-tech/rollup/cmd"
) )
var ( var (
base *docker.App testApps *testcontainers.TestcontainerApps
rollupApp *bcmd.MockApp rollupApp *bcmd.MockApp
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
base = docker.NewDockerApp() defer func() {
rollupApp = bcmd.NewRollupApp(base, "../../rollup/conf/config.json") if testApps != nil {
testApps.Free()
}
if rollupApp != nil {
rollupApp.Free()
}
}()
m.Run() m.Run()
rollupApp.Free()
base.Free()
} }
func TestCoordinatorProverInteraction(t *testing.T) { func setupEnv(t *testing.T) {
// Start postgres docker containers testApps = testcontainers.NewTestcontainerApps()
base.RunL2Geth(t) assert.NoError(t, testApps.StartPostgresContainer())
base.RunDBImage(t) assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
rollupApp = bcmd.NewRollupApp(testApps, "../../rollup/conf/config.json")
}
// Init data func TestFunction(t *testing.T) {
dbCfg := &database.Config{ setupEnv(t)
DSN: base.DBConfig.DSN, t.Run("TestCoordinatorProverInteraction", testCoordinatorProverInteraction)
DriverName: base.DBConfig.DriverName, t.Run("TestProverReLogin", testProverReLogin)
MaxOpenNum: base.DBConfig.MaxOpenNum, t.Run("TestERC20", testERC20)
MaxIdleNum: base.DBConfig.MaxIdleNum, t.Run("TestGreeter", testGreeter)
} }
db, err := database.InitDB(dbCfg) func setupDB(t *testing.T) *gorm.DB {
db, err := testApps.GetGormDBClient()
assert.NoError(t, err) assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func testCoordinatorProverInteraction(t *testing.T) {
db := setupDB(t)
sqlDB, err := db.DB() sqlDB, err := db.DB()
assert.NoError(t, err) assert.NoError(t, err)
@@ -66,7 +77,7 @@ func TestCoordinatorProverInteraction(t *testing.T) {
l2BlockOrm := orm.NewL2Block(db) l2BlockOrm := orm.NewL2Block(db)
// Connect to l2geth client // Connect to l2geth client
l2Client, err := base.L2Client() l2Client, err := testApps.GetL2GethClient()
if err != nil { if err != nil {
log.Fatalf("Failed to connect to the l2geth client: %v", err) log.Fatalf("Failed to connect to the l2geth client: %v", err)
} }
@@ -111,10 +122,9 @@ func TestCoordinatorProverInteraction(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
t.Log(version.Version) t.Log(version.Version)
base.Timestamp = time.Now().Nanosecond() coordinatorApp := capp.NewCoordinatorApp(testApps, "../../coordinator/conf/config.json", "./genesis.json")
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json", "./genesis.json") chunkProverApp := rapp.NewProverApp(testApps, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
chunkProverApp := rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint()) batchProverApp := rapp.NewProverApp(testApps, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp := rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
defer coordinatorApp.Free() defer coordinatorApp.Free()
defer chunkProverApp.Free() defer chunkProverApp.Free()
defer batchProverApp.Free() defer batchProverApp.Free()
@@ -139,17 +149,16 @@ func TestCoordinatorProverInteraction(t *testing.T) {
coordinatorApp.WaitExit() coordinatorApp.WaitExit()
} }
func TestProverReLogin(t *testing.T) { func testProverReLogin(t *testing.T) {
// Start postgres docker containers. client, err := testApps.GetGormDBClient()
base.RunL2Geth(t) assert.NoError(t, err)
base.RunDBImage(t) db, err := client.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db))
assert.NoError(t, migrate.ResetDB(base.DBClient(t))) coordinatorApp := capp.NewCoordinatorApp(testApps, "../../coordinator/conf/config.json", "./genesis.json")
chunkProverApp := rapp.NewProverApp(testApps, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
base.Timestamp = time.Now().Nanosecond() batchProverApp := rapp.NewProverApp(testApps, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json", "./genesis.json")
chunkProverApp := rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp := rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
defer coordinatorApp.Free() defer coordinatorApp.Free()
defer chunkProverApp.Free() defer chunkProverApp.Free()
defer batchProverApp.Free() defer batchProverApp.Free()

View File

@@ -21,6 +21,7 @@ type Batch struct {
// batch // batch
Index uint64 `json:"index" gorm:"column:index"` Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"` Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"` StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"` StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"` EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -49,6 +50,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"` OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"` OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata // metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -151,6 +156,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{ newBatch := Batch{
Index: batch.Index, Index: batch.Index,
Hash: daBatch.Hash().Hex(), Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(), StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex, StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(), EndChunkHash: endDAChunkHash.Hex(),
@@ -163,6 +169,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned), ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending), RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending), OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
} }
db := o.db db := o.db

View File

@@ -43,6 +43,10 @@ type Chunk struct {
// batch // batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"` BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata // metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"` TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"` TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -150,6 +154,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
ParentChunkStateRoot: parentChunkStateRoot, ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(), WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned), ProvingStatus: int16(types.ProvingTaskUnassigned),
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
} }
db := o.db db := o.db