Compare commits

...

32 Commits

Author SHA1 Message Date
HAOYUatHZ
2cccdf7003 Merge branch 'develop' into env1-sampling 2024-04-13 13:10:55 +08:00
HAOYUatHZ
6cc1f424d7 revert hongfan 2024-04-13 09:24:45 +08:00
HAOYUatHZ
5b827c3c18 feat(db): add batch_data_hash & blob metadata (#1221)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-12 15:45:06 +08:00
georgehao
6b2eb80aa5 feat: print version info on service startup (#1268)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-12 15:09:26 +08:00
HAOYUatHZ
af8437aa9d Merge branch 'develop' into env1-sampling 2024-04-12 11:54:20 +08:00
Péter Garamvölgyi
71f88b04f5 fix: add blobHash to challenge and piHash (#1264)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-12 11:00:37 +08:00
Zhang Zhuo
bcd9764bcd chore(libzkp): upgrade to v0.10.3 (#1267)
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-12 10:51:39 +08:00
georgehao
b4f8377a08 fix(coordinator): fix coordinator recover public key inconsistent (#1265)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-12 07:19:31 +08:00
HAOYUatHZ
133547c91c Merge branch 'develop' into env1-sampling 2024-04-11 19:45:51 +08:00
Zhang Zhuo
b52d43caa8 chore(libzkp): upgrade to v0.10.2 (#1257)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
Co-authored-by: Thegaram <Thegaram@users.noreply.github.com>
2024-04-11 13:44:09 +08:00
Mengran Lan
201bf401cd feat(coordinator): add new metric get_task_counter tracking prover info (#1235)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-11 12:47:03 +08:00
georgehao
898ac1d25c feat: update batch/chunk proving status when finalize without proof (#1255)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-11 11:15:09 +08:00
Snoppy
1336b89fb8 chore: fix typos (#1244)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-11 11:02:32 +08:00
Zhang Zhuo
73045df037 feat(coordinator): support multiple batch verifier versions (#1249)
Co-authored-by: georgehao <haohongfan@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2024-04-11 10:55:58 +08:00
Sina Pilehchiha
b3093e9eb6 fix: Implemented fixes for 4844 support fix review. (#1256) 2024-04-10 06:05:29 -04:00
colin
3d5250e52d feat(bridge-history): add puffer gateways (#1252)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-04-09 19:34:03 +08:00
HAOYUatHZ
b7324c76bc fix(test,verifier): fix TestFFI (#1250) 2024-04-09 16:47:43 +08:00
HAOYUatHZ
fcbf83bf3d fix 2024-04-09 09:29:42 +08:00
HAOYUatHZ
330fde44a9 fix 2024-04-09 08:59:04 +08:00
HAOYUatHZ
122cffe489 implement 2024-04-08 22:24:58 +08:00
HAOYUatHZ
b4dac7ab82 init config 2024-04-08 22:10:53 +08:00
Péter Garamvölgyi
6d6e98bd6e test(codecv1): add zkEVM standard challenge digest test vectors (#1213)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: Rohit Narurkar <rohit.narurkar@proton.me>
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2024-04-08 11:31:41 +08:00
JayLiu
9e35ce0ab4 test: add testcontainers (#1248) 2024-04-08 00:10:14 +08:00
colin
b86ebaefaf fix(scroll): bump version (#1246) 2024-04-07 14:26:59 +08:00
JayLiu
78a4298eda test: add testcontainers (#1229)
Co-authored-by: liuyuecai <liuyuecai@360.cn>
2024-04-07 14:26:06 +08:00
Zhang Zhuo
49d8387714 chore: upgrade libzkp to v0.10.0rc3, fix sha256 rows (#1245) 2024-04-07 14:20:51 +08:00
georgehao
af2913903b feat(coordinator): optimize get_task sql (#1228)
Co-authored-by: georgehao <georgehao@users.noreply.github.com>
2024-04-03 12:01:48 +08:00
Xi Lin
f8a7d70872 fix(contracts): fix number of non-skipped l1 messages (#1232)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2024-04-03 11:10:17 +08:00
Zhang Zhuo
790fc44b40 chore(libzkp): upgrade to v0.10.0l, fix keccak overflow (#1231)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-03 10:54:46 +08:00
Zhang Zhuo
620c71b16d fix(proving): use ChunkInfo embedded inside ChunkProof for tx_bytes (#1230)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-03 09:24:06 +08:00
Zhang Zhuo
ed0e0e4c18 feat(libzkp): upgrade to v0.10.0k to support blob DA (#1212)
Co-authored-by: kunxian-xia <xiakunxian130@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2024-04-02 10:09:39 +08:00
JayLiu
d203033e13 test: add testcontainers (#1217)
Co-authored-by: liuyuecai <liuyuecai@360.cn>
2024-04-02 00:52:57 +08:00
94 changed files with 2415 additions and 1688 deletions

View File

@@ -60,6 +60,7 @@ require (
github.com/holiman/uint256 v1.2.4 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
github.com/jackc/pgx/v5 v5.5.4 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect

View File

@@ -184,8 +184,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=

View File

@@ -23,6 +23,7 @@ type FetcherConfig struct {
DAIGatewayAddr string `json:"DAIGatewayAddr"`
USDCGatewayAddr string `json:"USDCGatewayAddr"`
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
PufferGatewayAddr string `json:"PufferGatewayAddr"`
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
ScrollChainAddr string `json:"ScrollChainAddr"`

View File

@@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
f := &L1FetcherLogic{

View File

@@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
}
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
}
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)

View File

@@ -1,62 +1,27 @@
package database
package database_test
import (
"context"
"errors"
"io"
"os"
"testing"
"time"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/common/database"
"scroll-tech/common/testcontainers"
"scroll-tech/common/version"
)
func TestGormLogger(t *testing.T) {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.LvlTrace)
log.Root().SetHandler(glogger)
var gl gormLogger
gl.gethLogger = log.Root()
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}
func TestDB(t *testing.T) {
version.Version = "v4.1.98-aaa-bbb-ccc"
base := docker.NewDockerApp()
base.RunDBImage(t)
dbCfg := &Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
testApps := testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
var err error
db, err := InitDB(dbCfg)
db, err := testApps.GetGormDBClient()
assert.NoError(t, err)
sqlDB, err := Ping(db)
sqlDB, err := database.Ping(db)
assert.NoError(t, err)
assert.NotNil(t, sqlDB)
assert.NoError(t, CloseDB(db))
assert.NoError(t, database.CloseDB(db))
}

View File

@@ -0,0 +1,35 @@
package database
import (
"context"
"errors"
"io"
"os"
"testing"
"time"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/scroll-tech/go-ethereum/log"
)
func TestGormLogger(t *testing.T) {
output := io.Writer(os.Stderr)
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
if usecolor {
output = colorable.NewColorableStderr()
}
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
glogger := log.NewGlogHandler(ostream)
// Set log level
glogger.Verbosity(log.LvlTrace)
log.Root().SetHandler(glogger)
var gl gormLogger
gl.gethLogger = log.Root()
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
}

View File

@@ -1,196 +0,0 @@
package docker
import (
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"time"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/common/utils"
)
var (
l1StartPort = 10000
l2StartPort = 20000
dbStartPort = 30000
)
// AppAPI app interface.
type AppAPI interface {
IsRunning() bool
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
WaitExit()
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
}
// App is collection struct of runtime docker images
type App struct {
L1gethImg GethImgInstance
L2gethImg GethImgInstance
DBImg ImgInstance
dbClient *sql.DB
DBConfig *database.DBConfig
DBConfigFile string
// common time stamp.
Timestamp int
}
// NewDockerApp returns new instance of dockerApp struct
func NewDockerApp() *App {
timestamp := time.Now().Nanosecond()
app := &App{
Timestamp: timestamp,
L1gethImg: newTestL1Docker(),
L2gethImg: newTestL2Docker(),
DBImg: newTestDBDocker("postgres"),
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
}
if err := app.mockDBConfig(); err != nil {
panic(err)
}
return app
}
// RunImages runs all images togather
func (b *App) RunImages(t *testing.T) {
b.RunDBImage(t)
b.RunL1Geth(t)
b.RunL2Geth(t)
}
// RunDBImage starts postgres docker container.
func (b *App) RunDBImage(t *testing.T) {
if b.DBImg.IsRunning() {
return
}
assert.NoError(t, b.DBImg.Start())
// try 5 times until the db is ready.
ok := utils.TryTimes(10, func() bool {
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
return err == nil && db != nil && db.Ping() == nil
})
assert.True(t, ok)
}
// Free clear all running images, double check and recycle docker container.
func (b *App) Free() {
if b.L1gethImg.IsRunning() {
_ = b.L1gethImg.Stop()
}
if b.L2gethImg.IsRunning() {
_ = b.L2gethImg.Stop()
}
if b.DBImg.IsRunning() {
_ = b.DBImg.Stop()
_ = os.Remove(b.DBConfigFile)
if !utils.IsNil(b.dbClient) {
_ = b.dbClient.Close()
b.dbClient = nil
}
}
}
// RunL1Geth starts l1geth docker container.
func (b *App) RunL1Geth(t *testing.T) {
if b.L1gethImg.IsRunning() {
return
}
assert.NoError(t, b.L1gethImg.Start())
}
// L1Client returns a ethclient by dialing running l1geth
func (b *App) L1Client() (*ethclient.Client, error) {
if utils.IsNil(b.L1gethImg) {
return nil, fmt.Errorf("l1 geth is not running")
}
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// RunL2Geth starts l2geth docker container.
func (b *App) RunL2Geth(t *testing.T) {
if b.L2gethImg.IsRunning() {
return
}
assert.NoError(t, b.L2gethImg.Start())
}
// L2Client returns a ethclient by dialing running l2geth
func (b *App) L2Client() (*ethclient.Client, error) {
if utils.IsNil(b.L2gethImg) {
return nil, fmt.Errorf("l2 geth is not running")
}
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
if err != nil {
return nil, err
}
return client, nil
}
// DBClient create and return *sql.DB instance.
func (b *App) DBClient(t *testing.T) *sql.DB {
if !utils.IsNil(b.dbClient) {
return b.dbClient
}
var (
cfg = b.DBConfig
err error
)
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
assert.NoError(t, err)
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
assert.NoError(t, b.dbClient.Ping())
return b.dbClient
}
func (b *App) mockDBConfig() error {
b.DBConfig = &database.DBConfig{
DSN: "",
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
if b.DBImg != nil {
b.DBConfig.DSN = b.DBImg.Endpoint()
}
data, err := json.Marshal(b.DBConfig)
if err != nil {
return err
}
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
}
func newTestL1Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
}
func newTestL2Docker() GethImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
}
func newTestDBDocker(driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
}

View File

@@ -1,131 +0,0 @@
package docker
import (
"context"
"fmt"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgDB the postgres image manager.
type ImgDB struct {
image string
name string
id string
dbName string
port int
password string
running bool
cmd *cmd.Cmd
}
// NewImgDB return postgres db img instance.
func NewImgDB(image, password, dbName string, port int) ImgInstance {
img := &ImgDB{
image: image,
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
password: password,
dbName: dbName,
port: port,
}
img.cmd = cmd.NewCmd("docker", img.prepare()...)
return img
}
// Start postgres db container.
func (i *ImgDB) Start() error {
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.running = i.isOk()
if !i.running {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
return nil
}
// Stop the container.
func (i *ImgDB) Stop() error {
if !i.running {
return nil
}
i.running = false
ctx := context.Background()
// stop the running container.
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
return err
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
}
// Endpoint return the dsn.
func (i *ImgDB) Endpoint() string {
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
}
// IsRunning returns docker container's running status.
func (i *ImgDB) IsRunning() bool {
return i.running
}
func (i *ImgDB) prepare() []string {
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
envs := []string{
"-e", "POSTGRES_PASSWORD=" + i.password,
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
}
cmd = append(cmd, envs...)
return append(cmd, i.image)
}
func (i *ImgDB) isOk() bool {
keyword := "database system is ready to accept connections"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 20):
return false
}
return i.id != ""
}

View File

@@ -1,174 +0,0 @@
package docker
import (
"context"
"fmt"
"math/big"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/scroll-tech/go-ethereum/ethclient"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgGeth the geth image manager include l1geth and l2geth.
type ImgGeth struct {
image string
name string
id string
volume string
ipcPath string
httpPort int
wsPort int
chainID *big.Int
running bool
cmd *cmd.Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
img := &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
volume: volume,
ipcPath: ipc,
httpPort: hPort,
wsPort: wPort,
}
img.cmd = cmd.NewCmd("docker", img.params()...)
return img
}
// Start run image and check if it is running healthily.
func (i *ImgGeth) Start() error {
id := GetContainerID(i.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.running = i.isOk()
if !i.running {
_ = i.Stop()
return fmt.Errorf("failed to start image: %s", i.image)
}
// try 10 times to get chainID until is ok.
utils.TryTimes(10, func() bool {
client, err := ethclient.Dial(i.Endpoint())
if err == nil && client != nil {
i.chainID, err = client.ChainID(context.Background())
return err == nil && i.chainID != nil
}
return false
})
return nil
}
// IsRunning returns docker container's running status.
func (i *ImgGeth) IsRunning() bool {
return i.running
}
// Endpoint return the connection endpoint.
func (i *ImgGeth) Endpoint() string {
switch true {
case i.httpPort != 0:
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
case i.wsPort != 0:
return fmt.Sprintf("ws://127.0.0.1:%d", i.wsPort)
default:
return i.ipcPath
}
}
// ChainID return chainID.
func (i *ImgGeth) ChainID() *big.Int {
return i.chainID
}
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
case err := <-i.cmd.ErrChan:
if err != nil {
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
}
case <-time.After(time.Second * 10):
return false
}
return i.id != ""
}
// Stop the docker container.
func (i *ImgGeth) Stop() error {
if !i.running {
return nil
}
i.running = false
ctx := context.Background()
// check if container is running, stop the running container.
id := GetContainerID(i.name)
if id != "" {
timeoutSec := 3
timeout := container.StopOptions{
Timeout: &timeoutSec,
}
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
return err
}
i.id = id
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
}
func (i *ImgGeth) params() []string {
cmds := []string{"run", "--rm", "--name", i.name}
var ports []string
if i.httpPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
}
if i.wsPort != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(i.wsPort) + ":8546"}...)
}
var envs []string
if i.ipcPath != "" {
envs = append(envs, []string{"-e", fmt.Sprintf("IPC_PATH=%s", i.ipcPath)}...)
}
if i.volume != "" {
cmds = append(cmds, []string{"-v", fmt.Sprintf("%s:%s", i.volume, i.volume)}...)
}
cmds = append(cmds, ports...)
cmds = append(cmds, envs...)
return append(cmds, i.image)
}

View File

@@ -1,54 +0,0 @@
package docker_test
import (
"context"
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
)
var (
base *docker.App
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
m.Run()
base.Free()
}
func TestDB(t *testing.T) {
base.RunDBImage(t)
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}
func TestL1Geth(t *testing.T) {
base.RunL1Geth(t)
client, err := base.L1Client()
assert.NoError(t, err)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}
func TestL2Geth(t *testing.T) {
base.RunL2Geth(t)
client, err := base.L2Client()
assert.NoError(t, err)
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
t.Logf("chainId: %s", chainID.String())
}

View File

@@ -9,8 +9,6 @@ require (
github.com/docker/docker v25.0.3+incompatible
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
github.com/modern-go/reflect2 v1.0.2
@@ -18,8 +16,9 @@ require (
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
github.com/stretchr/testify v1.9.0
github.com/testcontainers/testcontainers-go v0.29.1
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1
github.com/testcontainers/testcontainers-go v0.28.0
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0
github.com/urfave/cli/v2 v2.25.7
gorm.io/driver/postgres v1.5.0
gorm.io/gorm v1.25.5
@@ -127,7 +126,7 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.5.0 // indirect
github.com/jackc/pgx/v5 v5.5.4 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
@@ -144,7 +143,6 @@ require (
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect

View File

@@ -268,7 +268,6 @@ github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXS
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@@ -382,8 +381,8 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
@@ -400,8 +399,6 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -443,7 +440,6 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
@@ -469,9 +465,6 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -678,10 +671,12 @@ github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZ
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk=
github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI=
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1 h1:47ipPM+s+ltCDOP3Sa1j95AkNb+z+WGiHLDbLU8ixuc=
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1/go.mod h1:Sqh+Ef2ESdbJQjTJl57UOkEHkOc7gXvQLg1b5xh6f1Y=
github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8=
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0 h1:QOCeTYZIYixg796Ik60MOaeMgpAKPbQd5pJOdTrftyg=
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 h1:ff0s4JdYIdNAVSi/SrpN2Pdt1f+IjIw3AKjbHau8Un4=
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=

File diff suppressed because it is too large Load Diff

View File

@@ -8,26 +8,30 @@ edition = "2021"
crate-type = ["cdylib"]
[patch.crates-io]
gobuild = { git = "https://github.com/scroll-tech/gobuild.git" }
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
#ethers-etherscan = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
#ethers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
[patch."https://github.com/privacy-scaling-explorations/halo2wrong.git"]
halo2wrong = { git = "https://github.com/scroll-tech/halo2wrong.git", branch = "halo2-ecc-snark-verifier-0323" }
maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-ecc-snark-verifier-0323" }
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
[dependencies]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.9", default-features = false, features = ["parallel_syn", "scroll", "shanghai", "strict-ccc"] }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
once_cell = "1.8.0"
once_cell = "1.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"

View File

@@ -1 +1 @@
nightly-2022-12-10
nightly-2023-12-03

View File

@@ -12,6 +12,7 @@ use prover::{
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, BlockTrace, ChunkHash, ChunkProof,
};
use snark_verifier_sdk::verify_evm_calldata;
use std::{cell::OnceCell, env, ptr::null};
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -119,7 +120,7 @@ pub unsafe extern "C" fn gen_batch_proof(
let chunk_hashes_proofs = chunk_hashes
.into_iter()
.zip(chunk_proofs.into_iter())
.zip(chunk_proofs)
.collect();
let proof = PROVER
@@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof(
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
pub unsafe extern "C" fn verify_batch_proof(
proof: *const c_char,
fork_name: *const c_char,
) -> c_char {
let proof = c_char_to_vec(proof);
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
let fork_name_str = c_char_to_str(fork_name);
let fork_id = match fork_name_str {
"" => 0,
"shanghai" => 0,
"bernoulli" => 1,
_ => {
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
1
}
};
let verified = panic_catch(|| {
if fork_id == 0 {
// before upgrade#2(EIP4844)
verify_evm_calldata(
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
proof.calldata(),
)
} else {
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
}
});
verified.unwrap_or(false) as c_char
}

Binary file not shown.

View File

@@ -1,5 +1,3 @@
#![feature(once_cell)]
mod batch;
mod chunk;
mod types;

View File

@@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char* check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof);
char verify_batch_proof(char* proof, char* fork_name);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);

View File

@@ -0,0 +1,194 @@
package testcontainers
import (
"context"
"fmt"
"log"
"time"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/modules/postgres"
"github.com/testcontainers/testcontainers-go/wait"
"gorm.io/gorm"
"scroll-tech/common/database"
)
// TestcontainerApps testcontainers struct
type TestcontainerApps struct {
postgresContainer *postgres.PostgresContainer
l1GethContainer *testcontainers.DockerContainer
l2GethContainer *testcontainers.DockerContainer
// common time stamp in nanoseconds.
Timestamp int
}
// NewTestcontainerApps returns new instance of TestcontainerApps struct
func NewTestcontainerApps() *TestcontainerApps {
timestamp := time.Now().Nanosecond()
return &TestcontainerApps{
Timestamp: timestamp,
}
}
// StartPostgresContainer starts a postgres container
func (t *TestcontainerApps) StartPostgresContainer() error {
if t.postgresContainer != nil && t.postgresContainer.IsRunning() {
return nil
}
postgresContainer, err := postgres.RunContainer(context.Background(),
testcontainers.WithImage("postgres"),
postgres.WithDatabase("test_db"),
postgres.WithPassword("123456"),
testcontainers.WithWaitStrategy(
wait.ForLog("database system is ready to accept connections").WithOccurrence(2).WithStartupTimeout(5*time.Second)),
)
if err != nil {
log.Printf("failed to start postgres container: %s", err)
return err
}
t.postgresContainer = postgresContainer
return nil
}
// StartL1GethContainer starts a L1Geth container
func (t *TestcontainerApps) StartL1GethContainer() error {
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
return nil
}
req := testcontainers.ContainerRequest{
Image: "scroll_l1geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
Cmd: []string{"--log.debug", "ANY"},
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
}
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
if err != nil {
log.Printf("failed to start scroll_l1geth container: %s", err)
return err
}
t.l1GethContainer, _ = container.(*testcontainers.DockerContainer)
return nil
}
// StartL2GethContainer starts a L2Geth container
func (t *TestcontainerApps) StartL2GethContainer() error {
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
return nil
}
req := testcontainers.ContainerRequest{
Image: "scroll_l2geth",
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
}
genericContainerReq := testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
}
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
if err != nil {
log.Printf("failed to start scroll_l2geth container: %s", err)
return err
}
t.l2GethContainer, _ = container.(*testcontainers.DockerContainer)
return nil
}
// GetDBEndPoint returns the endpoint of the running postgres container
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
return "", fmt.Errorf("postgres is not running")
}
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
}
// GetL1GethEndPoint returns the endpoint of the running L1Geth container
func (t *TestcontainerApps) GetL1GethEndPoint() (string, error) {
if t.l1GethContainer == nil || !t.l1GethContainer.IsRunning() {
return "", fmt.Errorf("l1 geth is not running")
}
endpoint, err := t.l1GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
if err != nil {
return "", err
}
return endpoint, nil
}
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
return "", fmt.Errorf("l2 geth is not running")
}
endpoint, err := t.l2GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
if err != nil {
return "", err
}
return endpoint, nil
}
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
endpoint, err := t.GetDBEndPoint()
if err != nil {
return nil, err
}
dbCfg := &database.Config{
DSN: endpoint,
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
return database.InitDB(dbCfg)
}
// GetL1GethClient returns a ethclient by dialing running L1Geth
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL1GethEndPoint()
if err != nil {
return nil, err
}
client, err := ethclient.Dial(endpoint)
if err != nil {
return nil, err
}
return client, nil
}
// GetL2GethClient returns a ethclient by dialing running L2Geth
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
endpoint, err := t.GetL2GethEndPoint()
if err != nil {
return nil, err
}
client, err := ethclient.Dial(endpoint)
if err != nil {
return nil, err
}
return client, nil
}
// Free stops all running containers
func (t *TestcontainerApps) Free() {
ctx := context.Background()
if t.postgresContainer != nil && t.postgresContainer.IsRunning() {
if err := t.postgresContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop postgres container: %s", err)
}
}
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
if err := t.l1GethContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop scroll_l1geth container: %s", err)
}
}
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
if err := t.l2GethContainer.Terminate(ctx); err != nil {
log.Printf("failed to stop scroll_l2geth container: %s", err)
}
}
}

View File

@@ -0,0 +1,59 @@
package testcontainers
import (
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
)
// TestNewTestcontainerApps tests NewTestcontainerApps
func TestNewTestcontainerApps(t *testing.T) {
var (
err error
endpoint string
gormDBclient *gorm.DB
ethclient *ethclient.Client
)
// test start testcontainers
testApps := NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
endpoint, err = testApps.GetDBEndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
gormDBclient, err = testApps.GetGormDBClient()
assert.NoError(t, err)
assert.NotNil(t, gormDBclient)
assert.NoError(t, testApps.StartL1GethContainer())
endpoint, err = testApps.GetL1GethEndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetL1GethClient()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
assert.NoError(t, testApps.StartL2GethContainer())
endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
assert.NotEmpty(t, endpoint)
ethclient, err = testApps.GetL2GethClient()
assert.NoError(t, err)
assert.NotNil(t, ethclient)
// test free testcontainers
testApps.Free()
endpoint, err = testApps.GetDBEndPoint()
assert.EqualError(t, err, "postgres is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetL1GethEndPoint()
assert.EqualError(t, err, "l1 geth is not running")
assert.Empty(t, endpoint)
endpoint, err = testApps.GetL2GethEndPoint()
assert.EqualError(t, err, "l2 geth is not running")
assert.Empty(t, endpoint)
}

View File

@@ -227,18 +227,11 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
}
// blob payload
blob, z, err := constructBlobPayload(batch.Chunks)
blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
if err != nil {
return nil, err
}
// blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
daBatch := DABatch{
Version: CodecV1Version,
BatchIndex: batch.Index,
@@ -281,7 +274,7 @@ func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
}
// constructBlobPayload constructs the 4844 blob payload.
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) {
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
metadataLength := 2 + MaxNumChunks*4
@@ -289,8 +282,8 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
blobBytes := make([]byte, metadataLength)
// challenge digest preimage
// 1 hash for metadata and 1 for each chunk
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
// the chunk data hash used for calculating the challenge preimage
var chunkDataHash common.Hash
@@ -309,7 +302,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// encode L2 txs into blob payload
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
if err != nil {
return nil, nil, err
return nil, common.Hash{}, nil, err
}
blobBytes = append(blobBytes, rlpTxData...)
}
@@ -341,9 +334,19 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
// convert raw data to BLSFieldElements
blob, err := makeBlobCanonical(blobBytes)
if err != nil {
return nil, nil, err
return nil, common.Hash{}, nil, err
}
// compute blob versioned hash
c, err := kzg4844.BlobToCommitment(*blob)
if err != nil {
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
}
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
// challenge: append blob versioned hash
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
// compute z = challenge_digest % BLS_MODULUS
challengeDigest := crypto.Keccak256Hash(challengePreimage)
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
@@ -354,7 +357,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
start := 32 - len(pointBytes)
copy(z[start:], pointBytes)
return blob, &z, nil
return blob, blobVersionedHash, &z, nil
}
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.

View File

@@ -10,7 +10,9 @@ import (
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/stretchr/testify/assert"
)
@@ -477,55 +479,125 @@ func TestCodecV1BatchChallenge(t *testing.T) {
originalBatch := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
batch, err := NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b36", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef", hex.EncodeToString(batch.z[:]))
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c99294", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e58", hex.EncodeToString(batch.z[:]))
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8", hex.EncodeToString(batch.z[:]))
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk5}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk6}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk7}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
// 15 chunks
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b", hex.EncodeToString(batch.z[:]))
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk8, chunk9}}
batch, err = NewDABatch(originalBatch)
assert.NoError(t, err)
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:]))
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab", hex.EncodeToString(batch.z[:]))
}
func repeat(element byte, count int) string {
result := make([]byte, 0, count)
for i := 0; i < count; i++ {
result = append(result, element)
}
return "0x" + common.Bytes2Hex(result)
}
func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
nRowsData := 126914
for _, tc := range []struct {
chunks [][]string
expectedz string
expectedy string
}{
// single empty chunk
{chunks: [][]string{{}}, expectedz: "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", expectedy: "304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd08"},
// single non-empty chunk
{chunks: [][]string{{"0x010203"}}, expectedz: "1c1d4bd5153f877d799853080aba243f2c186dd6d6064eaefacfe715c92b6354", expectedy: "24e80ed99526b0d15ba46f7ec682f517576ddae68d5131e5d351f8bae06ea7d3"},
// multiple empty chunks
{chunks: [][]string{{}, {}}, expectedz: "152c9ccfcc2884f9891f7adce2de110cf9f85bfd0e21f0933ae0636390a84d41", expectedy: "5f6f532676e25b49e2eae77513fbeca173a300b434c0a5e24fa554b68e27d582"},
// multiple non-empty chunks
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "62100f5381179ea7db7aa8fdedb0f7fc7b82730b75432d50ab41f80aeebe45a3", expectedy: "5b1f6e7a54907ddc06871853cf1f5d53bf2de0df7b61d0df84bc2c3fb80320cd"},
// empty chunk followed by non-empty chunk
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "2d94d241c4a2a8d8f02845ca40cfba344f3b42384af2045a75c82e725a184232", expectedy: "302416c177e9e7fe40c3bc4315066c117e27d246b0a33ef68cdda6dd333c485c"},
// non-empty chunk followed by empty chunk
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "7227567e3b1dbacb48a32bb85e4e99f73e4bd5620ea8cd4f5ac00a364c86af9c", expectedy: "2eb3dfd28362f35f562f779e749a555d2f1f87ddc716e95f04133d25189a391c"},
// max number of chunks all empty
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "1128ac3e22ced6af85be4335e0d03a266946a7cade8047e7fc59d6c8be642321", expectedy: "2d9b16422ce17f328fd00c99349768f0cb0c8648115eb3bd9b7864617ba88059"},
// max number of chunks all non-empty
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1a4025a3d74e70b511007dd55a2e252478c48054c6383285e8a176f33d99853b", expectedy: "12071ac2571c11220432a27b8be549392892e9baf4c654748ca206def3843940"},
// single chunk blob full
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "72714cc4a0ca75cee2d543b1f958e3d3dd59ac7df0d9d5617d8117b65295a5f2", expectedy: "4ebb690362bcbc42321309c210c99f2ebdb53b3fcf7cf3b17b78f6cfd1203ed3"},
// multiple chunks blob full
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "70eb5b4db503e59413238eef451871c5d12f2bb96c8b96ceca012f4ca0114727", expectedy: "568d0aaf280ec83f9c81ed2d80ecbdf199bd72dafb8a350007d37ea82997e455"},
// max number of chunks only last one non-empty not full blob
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "03db68ae16ee88489d52db19e6111b25630c5f23ad7cd14530aacf0cd231d476", expectedy: "24527d0b0e93b3dec0060c7b128975a8088b3104d3a297dc807ab43862a77a1a"},
// max number of chunks only last one non-empty full blob
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "677670193f73db499cede572bcb55677f0d2f13d690f9a820bd00bf584c3c241", expectedy: "1d85677f172dbdf4ad3094a17deeb1df4d7d2b7f35ecea44aebffa757811a268"},
// max number of chunks but last is empty
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "22935042dfe7df771b02c1f5cababfe508869e8f6339dabe25a8a32e37728bb0", expectedy: "48ca66fb5a094401728c3a6a517ffbd72c4d4d9a8c907e2d2f1320812f4d856f"},
} {
chunks := []*encoding.Chunk{}
for _, c := range tc.chunks {
block := &encoding.Block{Transactions: []*types.TransactionData{}}
for _, data := range c {
tx := &types.TransactionData{Type: 0xff, Data: data}
block.Transactions = append(block.Transactions, tx)
}
chunk := &encoding.Chunk{Blocks: []*encoding.Block{block}}
chunks = append(chunks, chunk)
}
b, _, z, err := constructBlobPayload(chunks)
assert.NoError(t, err)
actualZ := hex.EncodeToString(z[:])
assert.Equal(t, tc.expectedz, actualZ)
_, y, err := kzg4844.ComputeProof(*b, *z)
assert.NoError(t, err)
actualY := hex.EncodeToString(y[:])
assert.Equal(t, tc.expectedy, actualY)
}
}
func TestCodecV1BatchBlobDataProof(t *testing.T) {
@@ -536,7 +608,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err := batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b363d27683f7aab53cf071e2c8c8f3abfe750d206c048489450d120679cdc823f7db44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607a30dad96431f70551dd950c1426131d73ccea6d050d38dea123aad90aa8c0b734c98e8e04bd8ea8f19b415f2d85156d8", hex.EncodeToString(verifyData))
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef423dc493f1dd7c9fbecdffa021ca4649b13e8d72231487034ec6b27e155ecfd7b44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607b38542ec811c92d86ff6f3319de06ee60c42655278ccf874f3615f450de730895276828b73db03c553b0bc7e5474a5e0", hex.EncodeToString(verifyData))
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
@@ -545,7 +617,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c992946dc7e51a42a31f429bc1f321dcf020b9a661225259522dba186fcfe5dc012191b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e86a0b3c76e33edb24eb07faeaa5d3f2b15a55df6ab99abf828b5803f5681dc634602eb7469ee0556563b2eccebf16ec822", hex.EncodeToString(verifyData))
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e5821975f318babe50be728f9b52754d5ce2caa2ba82ba35b5888af1c5f28d23206b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e868e0c6fe7bd39baa5ee6339cd334a18af7c680d24e825262499e83b31633b13a9ee89813fae8441630c82bc9dce3f1e07", hex.EncodeToString(verifyData))
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
@@ -554,7 +626,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a24164e6ea8b7946ce5e40d2baa4f6aa0d030076f6074295288133c00e75dafa2afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d676248f5ca4a9f0d9b7fa48f5f649dc84e928161fd99ad1bd9a9879b05d29c5f718bfb3b0a696a5f3ed50b5b8c6a9d530b3ee", hex.EncodeToString(verifyData))
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8088a01e54e3565d2e91ce6afbadf479330847d9106737875303ce17f17c48722afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d67624aee03a0f7cdb7807bc7e0b9fb20bc299af2a35e38cda816708b40f2f18db491e14a0f5d9cfe2f4c12e4ca1a219484f17", hex.EncodeToString(verifyData))
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
@@ -563,7 +635,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
@@ -572,7 +644,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
@@ -581,7 +653,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
// 15 chunks
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
@@ -589,7 +661,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2613ca15d051a539e3b239027f9bdbd03bd3c66c98afafb674e2a7441912cbe099743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc93291fbc65cfa558e4df12bcde442483d31072000c56f94fe012285bc5832eaee5fe1d47f1e8655539c4500f66207d8edc6", hex.EncodeToString(verifyData))
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b54d28f1479467d8b97fb99f5257d3e5d63a81cb2d60e3564fe6ec6066a311c119743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc932a0b603cc94be2007d4b3b17af06b4fb30caf0e574d5abcfc5654079e65154679afad75844396082a7200a4e82462aeed", hex.EncodeToString(verifyData))
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
@@ -598,7 +670,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
assert.NoError(t, err)
verifyData, err = batch.BlobDataProof()
assert.NoError(t, err)
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c4330b3863672052b3d6c6552d121b0b13f97659f49bbfb6d7fed6e4b7076e4a43383bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fdae9cb71d402cfe8bc4d659f228c41f0b9d195c5074278a2346204cfaa336f5de2244a3d53e0effa2f49c81924720e84e", hex.EncodeToString(verifyData))
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab35b73ddb4a78fc4a8540f1d8259512c46e606a701e7ef7742e38cc4562ef53b983bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fda28e5610ca6b185d6ac30b53bd83d6366fccb1956daafa90ff6b504a966b119ebb45cb3f7085b7c1d622ee1ad27fcff9", hex.EncodeToString(verifyData))
}
func TestCodecV1BatchSkipBitmap(t *testing.T) {

View File

@@ -6,6 +6,7 @@ import (
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
)
// CodecVersion defines the version of encoder and decoder.
@@ -17,8 +18,18 @@ const (
// CodecV1 represents the version 1 of the encoder and decoder.
CodecV1
// txTypeTest is a special transaction type used in unit tests.
txTypeTest = 0xff
)
func init() {
// make sure txTypeTest will not interfere with other transaction types
if txTypeTest == types.LegacyTxType || txTypeTest == types.AccessListTxType || txTypeTest == types.DynamicFeeTxType || txTypeTest == types.BlobTxType || txTypeTest == types.L1MessageTxType {
log.Crit("txTypeTest is overlapping with existing transaction types")
}
}
// Block represents an L2 block.
type Block struct {
Header *types.Header
@@ -134,6 +145,10 @@ func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
S: txData.S.ToInt(),
})
case txTypeTest:
// in the tests, we simply use `data` as the RLP-encoded transaction
return data, nil
case types.L1MessageTxType: // L1MessageTxType is not supported
default:
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)

View File

@@ -0,0 +1,91 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
// HardForkName the hard fork name
HardForkName string `json:"hard_fork_name"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -0,0 +1,89 @@
package message
import (
"crypto/ecdsa"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type LegacyAuthMsg struct {
// Message fields
Identity *LegacyIdentity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// LegacyIdentity contains all the fields to be signed by the prover.
type LegacyIdentity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *LegacyAuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *LegacyAuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *LegacyIdentity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -58,26 +58,6 @@ const (
ProofTypeBatch
)
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
// It effectively acts as a registration, and makes the Prover identification
// known to the Sequencer.
type AuthMsg struct {
// Message fields
Identity *Identity `json:"message"`
// Prover signature
Signature string `json:"signature"`
}
// Identity contains all the fields to be signed by the prover.
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
// GenerateToken generates token
func GenerateToken() (string, error) {
b := make([]byte, 16)
@@ -87,65 +67,6 @@ func GenerateToken() (string, error) {
return hex.EncodeToString(b), nil
}
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Identity.Hash()
if err != nil {
return err
}
// Sign register message
sig, err := crypto.Sign(hash, priv)
if err != nil {
return err
}
a.Signature = hexutil.Encode(sig)
return nil
}
// Verify verifies the message of auth.
func (a *AuthMsg) Verify() (bool, error) {
hash, err := a.Identity.Hash()
if err != nil {
return false, err
}
sig := common.FromHex(a.Signature)
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return false, err
}
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
}
// PublicKey return public key from signature
func (a *AuthMsg) PublicKey() (string, error) {
hash, err := a.Identity.Hash()
if err != nil {
return "", err
}
sig := common.FromHex(a.Signature)
// recover public key
pk, err := crypto.SigToPub(hash, sig)
if err != nil {
return "", err
}
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// ProofMsg is the data structure sent to the coordinator.
type ProofMsg struct {
*ProofDetail `json:"zkProof"`
@@ -259,6 +180,7 @@ type ChunkInfo struct {
WithdrawRoot common.Hash `json:"withdraw_root"`
DataHash common.Hash `json:"data_hash"`
IsPadding bool `json:"is_padding"`
TxBytes []byte `json:"tx_bytes"`
}
// ChunkProof includes the proof info that are required for chunk verification and rollup.

View File

@@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) {
hash, err := identity.Hash()
assert.NoError(t, err)
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.79"
var tag = "v4.3.92"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -5,7 +5,7 @@
"license": "MIT",
"scripts": {
"test:hardhat": "npx hardhat test",
"test:forge": "forge test -vvv",
"test:forge": "forge test -vvv --evm-version cancun",
"test": "yarn test:hardhat && yarn test:forge",
"solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'",
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",

View File

@@ -2,6 +2,8 @@
pragma solidity ^0.8.24;
/// @title IScrollChain
/// @notice The interface for ScrollChain.
interface IScrollChain {
/**********
* Events *
@@ -43,23 +45,23 @@ interface IScrollChain {
* Public View Functions *
*************************/
/// @notice The latest finalized batch index.
/// @return The latest finalized batch index.
function lastFinalizedBatchIndex() external view returns (uint256);
/// @notice Return the batch hash of a committed batch.
/// @param batchIndex The index of the batch.
/// @return The batch hash of a committed batch.
function committedBatches(uint256 batchIndex) external view returns (bytes32);
/// @notice Return the state root of a committed batch.
/// @param batchIndex The index of the batch.
/// @return The state root of a committed batch.
function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32);
/// @notice Return the message root of a committed batch.
/// @param batchIndex The index of the batch.
/// @return The message root of a committed batch.
function withdrawRoots(uint256 batchIndex) external view returns (bytes32);
/// @notice Return whether the batch is finalized by batch index.
/// @param batchIndex The index of the batch.
/// @return Whether the batch is finalized by batch index.
function isBatchFinalized(uint256 batchIndex) external view returns (bool);
/*****************************

View File

@@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol";
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
/// @title MultipleVersionRollupVerifier
/// @notice Verifies aggregate zk proofs using the appropriate verifier.
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/**********
* Events *
@@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
*************/
/// @notice The address of ScrollChain contract.
address immutable scrollChain;
address public immutable scrollChain;
/***********
* Structs *
@@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// The verifiers are sorted by batchIndex in increasing order.
mapping(uint256 => Verifier[]) public legacyVerifiers;
/// @notice Mapping from verifier version to the lastest used zkevm verifier.
/// @notice Mapping from verifier version to the latest used zkevm verifier.
mapping(uint256 => Verifier) public latestVerifier;
/***************
@@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
*************************/
/// @notice Return the number of legacy verifiers.
/// @param _version The version of legacy verifiers.
/// @return The number of legacy verifiers.
function legacyVerifiersLength(uint256 _version) external view returns (uint256) {
return legacyVerifiers[_version].length;
}
@@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
/// @notice Compute the verifier should be used for specific batch.
/// @param _version The version of verifier to query.
/// @param _batchIndex The batch index to query.
/// @return The address of verifier.
function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) {
// Normally, we will use the latest verifier.
Verifier memory _verifier = latestVerifier[_version];
@@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
************************/
/// @notice Update the address of zkevm verifier.
/// @param _version The version of the verifier.
/// @param _startBatchIndex The start batch index when the verifier will be used.
/// @param _verifier The address of new verifier.
function updateVerifier(

View File

@@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
*************/
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
/// point evaluation precompile
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
/// @notice The chain id of the corresponding layer 2 chain.
uint64 public immutable layer2ChainId;
@@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
*****************************/
/// @notice Import layer 2 genesis block
/// @param _batchHeader The header of the genesis batch.
/// @param _stateRoot The state root of the genesis block.
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
// check genesis batch header length
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
@@ -475,7 +477,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
_postStateRoot,
_withdrawRoot,
_dataHash,
_blobDataProof[0:64]
_blobDataProof[0:64],
_blobVersionedHash
)
);
@@ -877,7 +880,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
unchecked {
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
_totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;

View File

@@ -2,6 +2,8 @@
pragma solidity ^0.8.24;
/// @title IRollupVerifier
/// @notice The interface for rollup verifier.
interface IRollupVerifier {
/// @notice Verify aggregate zk proof.
/// @param batchIndex The batch index to verify.

View File

@@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier {
}
// decodes all RLP encoded data and stores their DATA items
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region.
// [length - 128 bits | calldata offset - 128 bits] in a continuous memory region.
// Expects that the RLP starts with a list that defines the length
// of the whole RLP region.
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
@@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier {
}
// the one and only boundary check
// in case an attacker crafted a malicous payload
// in case an attacker crafted a malicious payload
// and succeeds in the prior verification steps
// then this should catch any bogus accesses
if iszero(eq(ptr, add(proof.offset, proof.length))) {

View File

@@ -83,6 +83,8 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
}
function testTransferUSDCRoles(address owner) external {
hevm.assume(owner != address(0));
// non-whitelisted caller call, should revert
hevm.expectRevert("only circle caller");
gateway.transferUSDCRoles(owner);

View File

@@ -12,11 +12,11 @@ import (
"github.com/scroll-tech/go-ethereum/params"
coordinatorConfig "scroll-tech/coordinator/internal/config"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/utils"
coordinatorConfig "scroll-tech/coordinator/internal/config"
)
var (
@@ -28,7 +28,7 @@ type CoordinatorApp struct {
Config *coordinatorConfig.Config
ChainConfig *params.ChainConfig
base *docker.App
testApps *testcontainers.TestcontainerApps
configOriginFile string
chainConfigOriginFile string
@@ -37,17 +37,17 @@ type CoordinatorApp struct {
HTTPPort int64
args []string
docker.AppAPI
*cmd.Cmd
}
// NewCoordinatorApp return a new coordinatorApp manager.
func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile string) *CoordinatorApp {
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp)
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", base.Timestamp)
func NewCoordinatorApp(testApps *testcontainers.TestcontainerApps, configFile string, chainConfigFile string) *CoordinatorApp {
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", testApps.Timestamp)
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", testApps.Timestamp)
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
httpPort := port.Int64() + httpStartPort
coordinatorApp := &CoordinatorApp{
base: base,
testApps: testApps,
configOriginFile: configFile,
chainConfigOriginFile: chainConfigFile,
coordinatorFile: coordinatorFile,
@@ -63,14 +63,14 @@ func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile stri
// RunApp run coordinator-test child process by multi parameters.
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
c.Cmd = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
c.Cmd.RunApp(func() bool { return c.Cmd.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
}
// Free stop and release coordinator-test.
func (c *CoordinatorApp) Free() {
if !utils.IsNil(c.AppAPI) {
c.AppAPI.WaitExit()
if !utils.IsNil(c.Cmd) {
c.Cmd.WaitExit()
}
_ = os.Remove(c.coordinatorFile)
}
@@ -82,7 +82,6 @@ func (c *CoordinatorApp) HTTPEndpoint() string {
// MockConfig creates a new coordinator config.
func (c *CoordinatorApp) MockConfig(store bool) error {
base := c.base
cfg, err := coordinatorConfig.NewConfig(c.configOriginFile)
if err != nil {
return err
@@ -97,7 +96,11 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
MaxVerifierWorkers: 4,
MinProverVersion: "v1.0.0",
}
cfg.DB.DSN = base.DBImg.Endpoint()
endpoint, err := c.testApps.GetDBEndPoint()
if err != nil {
return err
}
cfg.DB.DSN = endpoint
cfg.L2.ChainID = 111
cfg.Auth.ChallengeExpireDurationSec = 1
cfg.Auth.LoginExpireDurationSec = 1

View File

@@ -5,6 +5,7 @@
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,
"verifier": {
"fork_name": "bernoulli",
"mock_mode": true,
"params_path": "",
"assets_path": ""

View File

@@ -50,6 +50,7 @@ type Config struct {
// VerifierConfig load zk verifier config.
type VerifierConfig struct {
ForkName string `json:"fork_name"`
MockMode bool `json:"mock_mode"`
ParamsPath string `json:"params_path"`
AssetsPath string `json:"assets_path"`

View File

@@ -53,25 +53,44 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
return jwt.MapClaims{}
}
// recover the public key
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
var publicKey string
var err error
if v.Message.HardForkName != "" {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
HardForkName: v.Message.HardForkName,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: v.Message.Challenge,
ProverName: v.Message.ProverName,
ProverVersion: v.Message.ProverVersion,
},
Signature: v.Signature,
}
publicKey, err = authMsg.PublicKey()
}
publicKey, err := authMsg.PublicKey()
if err != nil {
return jwt.MapClaims{}
}
if v.Message.HardForkName == "" {
v.Message.HardForkName = "shanghai"
}
return jwt.MapClaims{
types.PublicKey: publicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.HardForkName: v.Message.HardForkName,
}
}
@@ -89,5 +108,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
if proverVersion, ok := claims[types.ProverVersion]; ok {
c.Set(types.ProverVersion, proverVersion)
}
if hardForkName, ok := claims[types.HardForkName]; ok {
c.Set(types.HardForkName, hardForkName)
}
return nil
}

View File

@@ -2,6 +2,7 @@ package api
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -25,6 +26,8 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
panic("proof receiver new verifier failure")
}
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
Auth = NewAuthController(db)
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)

View File

@@ -6,6 +6,8 @@ import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
@@ -21,15 +23,21 @@ import (
// GetTaskController the get prover task api controller
type GetTaskController struct {
proverTasks map[message.ProofType]provertask.ProverTask
getTaskAccessCounter *prometheus.CounterVec
}
// NewGetTaskController create a get prover task controller
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg)
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
ptc := &GetTaskController{
proverTasks: make(map[message.ProofType]provertask.ProverTask),
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_get_task_access_count",
Help: "Multi dimensions get task counter.",
}, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}),
}
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
@@ -38,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
return ptc
}
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return fmt.Errorf("get public key from context failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return fmt.Errorf("get prover version from context failed")
}
ptc.getTaskAccessCounter.With(prometheus.Labels{
coordinatorType.LabelProverPublicKey: publicKey.(string),
coordinatorType.LabelProverName: proverName.(string),
coordinatorType.LabelProverVersion: proverVersion.(string),
}).Inc()
return nil
}
// GetTasks get assigned chunk/batch task
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
var getTaskParameter coordinatorType.GetTaskParameter
@@ -55,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
return
}
if err := ptc.incGetTaskAccessCounter(ctx); err != nil {
log.Warn("get_task access counter inc failed", "error", err.Error())
}
result, err := proverTask.Assign(ctx, &getTaskParameter)
if err != nil {
nerr := fmt.Errorf("return prover task err:%w", err)

View File

@@ -31,16 +31,17 @@ type BatchProverTask struct {
batchAttemptsExceedTotal prometheus.Counter
batchTaskGetTaskTotal *prometheus.CounterVec
batchTaskGetTaskProver *prometheus.CounterVec
}
// NewBatchProverTask new a batch collector
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
bp := &BatchProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
vkMap: vkMap,
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
@@ -58,6 +59,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_batch_get_task_total",
Help: "Total number of batch get task.",
}, []string{"fork_name"}),
batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"),
}
return bp
}
@@ -69,9 +71,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName)
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err
}
@@ -83,7 +85,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
if fromBlockNum != 0 {
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
if chunkErr != nil {
log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if startChunk == nil {
@@ -93,8 +95,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
if toBlockNum != math.MaxInt64 {
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
if err != nil {
log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
if chunkErr != nil {
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
return nil, ErrCoordinatorInternalFailure
}
if toChunk != nil {
@@ -179,7 +181,12 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
bp.batchTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil
}
@@ -209,6 +216,9 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
DataHash: common.HexToHash(chunk.Hash),
IsPadding: false,
}
if proof.ChunkInfo != nil {
chunkInfo.TxBytes = proof.ChunkInfo.TxBytes
}
chunkInfos = append(chunkInfos, &chunkInfo)
}

View File

@@ -29,15 +29,16 @@ type ChunkProverTask struct {
chunkAttemptsExceedTotal prometheus.Counter
chunkTaskGetTaskTotal *prometheus.CounterVec
chunkTaskGetTaskProver *prometheus.CounterVec
}
// NewChunkProverTask new a chunk prover task
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
cp := &ChunkProverTask{
BaseProverTask: BaseProverTask{
vk: vk,
vkMap: vkMap,
db: db,
cfg: cfg,
nameForkMap: nameForkMap,
@@ -55,6 +56,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
Name: "coordinator_chunk_get_task_total",
Help: "Total number of chunk get task.",
}, []string{"fork_name"}),
chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"),
}
return cp
}
@@ -66,9 +68,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
}
hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName)
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
return nil, err
}
@@ -151,7 +153,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, ErrCoordinatorInternalFailure
}
cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
coordinatorType.LabelProverName: proverTask.ProverName,
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
}).Inc()
return taskMsg, nil
}

View File

@@ -2,8 +2,12 @@ package provertask
import (
"fmt"
"sync"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
"scroll-tech/common/version"
@@ -13,11 +17,12 @@ import (
coordinatorType "scroll-tech/coordinator/internal/types"
)
// ErrCoordinatorInternalFailure coordinator internal db failure
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name
var ErrHardForkName = fmt.Errorf("wrong hard fork name")
var (
// ErrCoordinatorInternalFailure coordinator internal db failure
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
// ErrHardForkName indicates client request with the wrong hard fork name
ErrHardForkName = fmt.Errorf("wrong hard fork name")
)
// ProverTask the interface of a collector who send data to prover
type ProverTask interface {
@@ -28,8 +33,8 @@ type ProverTask interface {
type BaseProverTask struct {
cfg *config.Config
db *gorm.DB
vk string
vkMap map[string]string
nameForkMap map[string]uint64
forkHeights []uint64
@@ -44,6 +49,7 @@ type proverTaskContext struct {
PublicKey string
ProverName string
ProverVersion string
HardForkName string
}
// checkParameter check the prover task parameter illegal
@@ -68,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
}
ptc.ProverVersion = proverVersion.(string)
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, fmt.Errorf("get hard fork name from context failed")
}
ptc.HardForkName = hardForkName.(string)
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
}
vk, vkExist := b.vkMap[ptc.HardForkName]
if !vkExist {
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
}
// if the prover has a different vk
if getTaskParameter.VK != b.vk {
if getTaskParameter.VK != vk {
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
// if the prover reports a different prover version
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
@@ -115,3 +133,22 @@ func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error
return hardForkNumber, nil
}
var (
getTaskCounterInitOnce sync.Once
getTaskCounterVec *prometheus.CounterVec = nil
)
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
getTaskCounterInitOnce.Do(func() {
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
Name: "coordinator_get_task_count",
Help: "Multi dimensions get task counter.",
}, []string{"task_type",
coordinatorType.LabelProverName,
coordinatorType.LabelProverPublicKey,
coordinatorType.LabelProverVersion})
})
return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType})
}

View File

@@ -134,6 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
if len(pv) == 0 {
return fmt.Errorf("get ProverVersion from context failed")
}
hardForkName := ctx.GetString(coordinatorType.HardForkName)
var proverTask *orm.ProverTask
var err error
@@ -156,20 +157,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
return err
}
m.verifierTotal.WithLabelValues(pv).Inc()
var success bool
success := true
var verifyErr error
if proofMsg.Type == message.ProofTypeChunk {
success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof)
} else if proofMsg.Type == message.ProofTypeBatch {
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof)
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
if proofMsg.Type == message.ProofTypeBatch {
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
}
if verifyErr != nil || !success {
@@ -178,7 +178,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
if verifyErr != nil {
return ErrValidatorFailureVerifiedFailed
@@ -189,7 +189,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
m.proofSubmitFailure.Inc()
@@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
return nil
}
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
defer func() {
if err != nil {
m.validateFailureTotal.Inc()
@@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
"cannot submit valid proof for a prover task twice",
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
"proverPublicKey", proverTask.ProverPublicKey,
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
)
return ErrValidatorFailureProverTaskCannotSubmitTwice
}
@@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
log.Info("proof generated by prover failed",
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
"failureMessage", failureMsg)
"failureMessage", failureMsg, "forkName", forkName)
return ErrValidatorFailureProofMsgStatusNotOk
}
@@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
m.validateFailureProverTaskTimeout.Inc()
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
}
@@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
m.validateFailureProverTaskHaveVerifier.Inc()
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil

View File

@@ -9,8 +9,26 @@ import (
)
// NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
return &Verifier{}, nil
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
// VerifyChunkProof return a mock verification result for a ChunkProof.
@@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
}
// VerifyBatchProof return a mock verification result for a BatchProof.
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if string(proof.Proof) == InvalidTestProof {
return false, nil
}

View File

@@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
// Verifier represents a rust ffi to a halo2 verifier.
type Verifier struct {
cfg *config.VerifierConfig
BatchVK string
ChunkVK string
cfg *config.VerifierConfig
ChunkVKMap map[string]string
BatchVKMap map[string]string
}

View File

@@ -11,9 +11,11 @@ package verifier
import "C" //nolint:typecheck
import (
"embed"
"encoding/base64"
"encoding/json"
"io"
"io/fs"
"os"
"path"
"unsafe"
@@ -28,7 +30,26 @@ import (
// NewVerifier Sets up a rust ffi to call verify.
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
if cfg.MockMode {
return &Verifier{cfg: cfg}, nil
batchVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
chunkVKMap := map[string]string{
"shanghai": "",
"bernoulli": "",
"london": "",
"istanbul": "",
"homestead": "",
"eip155": "",
}
batchVKMap[cfg.ForkName] = ""
chunkVKMap[cfg.ForkName] = ""
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
}
paramsPathStr := C.CString(cfg.ParamsPath)
assetsPathStr := C.CString(cfg.AssetsPath)
@@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
C.init_batch_verifier(paramsPathStr, assetsPathStr)
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
v := &Verifier{
cfg: cfg,
ChunkVKMap: make(map[string]string),
BatchVKMap: make(map[string]string),
}
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
if err != nil {
return nil, err
}
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
if err != nil {
return nil, err
}
v.BatchVKMap[cfg.ForkName] = batchVK
v.ChunkVKMap[cfg.ForkName] = chunkVK
return &Verifier{
cfg: cfg,
BatchVK: batchVK,
ChunkVK: chunkVK,
}, nil
if err := v.loadEmbedVK(); err != nil {
return nil, err
}
return v, nil
}
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
if v.cfg.MockMode {
log.Info("Mock mode, batch verifier disabled")
if string(proof.Proof) == InvalidTestProof {
@@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
return false, err
}
log.Info("Start to verify batch proof", "forkName", forkName)
proofStr := C.CString(string(buf))
forkNameStr := C.CString(forkName)
defer func() {
C.free(unsafe.Pointer(proofStr))
C.free(unsafe.Pointer(forkNameStr))
}()
log.Info("Start to verify batch proof ...")
verified := C.verify_batch_proof(proofStr)
verified := C.verify_batch_proof(proofStr, forkNameStr)
return verified != 0, nil
}
@@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
return verified != 0, nil
}
func readVK(filePat string) (string, error) {
func (v *Verifier) readVK(filePat string) (string, error) {
f, err := os.Open(filePat)
if err != nil {
return "", err
@@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) {
}
return base64.StdEncoding.EncodeToString(byt), nil
}
//go:embed legacy_vk/*
var legacyVKFS embed.FS
func (v *Verifier) loadEmbedVK() error {
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
if err != nil {
log.Error("load embed batch vk failure", "err", err)
return err
}
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
if err != nil {
log.Error("load embed chunk vk failure", "err", err)
return err
}
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
return nil
}

View File

@@ -14,7 +14,6 @@ import (
"scroll-tech/common/types/message"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/logic/verifier"
)
var (
@@ -34,7 +33,7 @@ func TestFFI(t *testing.T) {
AssetsPath: *assetsPath,
}
v, err := verifier.NewVerifier(cfg)
v, err := NewVerifier(cfg)
as.NoError(err)
chunkProof1 := readChunkProof(*chunkProofPath1, as)
@@ -50,7 +49,7 @@ func TestFFI(t *testing.T) {
t.Log("Verified chunk proof 2")
batchProof := readBatchProof(*batchProofPath, as)
batchOk, err := v.VerifyBatchProof(batchProof)
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
as.NoError(err)
as.True(batchOk)
t.Log("Verified batch proof")

View File

@@ -24,6 +24,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -54,6 +55,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -73,22 +78,16 @@ func (*Batch) TableName() string {
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
// The returned batch are sorted in ascending order by their index.
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
db = db.Where("start_chunk_index >= ?", startChunkIndex)
db = db.Where("end_chunk_index < ?", endChunkIndex)
var batch Batch
err := db.First(&batch).Error
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
err := db.Raw(sql).Scan(&batch).Error
if err != nil {
return nil, fmt.Errorf("Batch.GetUnassignedBatches error: %w", err)
return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err)
}
if batch.Hash == "" {
return nil, nil
}
return &batch, nil
}
@@ -96,22 +95,16 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChun
// GetAssignedBatch retrieves assigned batch based on the specified limit.
// The returned batch are sorted in ascending order by their index.
func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
db := o.db.WithContext(ctx)
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
db = db.Where("start_chunk_index >= ?", startChunkIndex)
db = db.Where("end_chunk_index < ?", endChunkIndex)
var batch Batch
err := db.First(&batch).Error
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
err := db.Raw(sql).Scan(&batch).Error
if err != nil {
return nil, fmt.Errorf("Batch.GetAssignedBatches error: %w", err)
return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err)
}
if batch.Hash == "" {
return nil, nil
}
return &batch, nil
}
@@ -260,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{
Index: batch.Index,
Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(),
@@ -274,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ActiveAttempts: 0,
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

View File

@@ -48,6 +48,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -71,22 +75,16 @@ func (*Chunk) TableName() string {
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("start_block_number >= ?", fromBlockNum)
db = db.Where("end_block_number < ?", toBlockNum)
var chunk Chunk
err := db.First(&chunk).Error
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
err := db.Raw(sql).Scan(&chunk).Error
if err != nil {
return nil, fmt.Errorf("Chunk.GetUnassignedChunks error: %w", err)
return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
}
if chunk.Hash == "" {
return nil, nil
}
return &chunk, nil
}
@@ -94,22 +92,16 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("start_block_number >= ?", fromBlockNum)
db = db.Where("end_block_number < ?", toBlockNum)
var chunk Chunk
err := db.First(&chunk).Error
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
db := o.db.WithContext(ctx)
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
err := db.Raw(sql).Scan(&chunk).Error
if err != nil {
return nil, fmt.Errorf("Chunk.GetAssignedChunks error: %w", err)
return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
}
if chunk.Hash == "" {
return nil, nil
}
return &chunk, nil
}
@@ -312,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned),
TotalAttempts: 0,
ActiveAttempts: 0,
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

View File

@@ -9,41 +9,36 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
)
var (
base *docker.App
testApps *testcontainers.TestcontainerApps
db *gorm.DB
proverTaskOrm *ProverTask
)
func TestMain(m *testing.M) {
t := &testing.T{}
setupEnv(t)
defer tearDownEnv(t)
defer func() {
if testApps != nil {
testApps.Free()
}
tearDownEnv(t)
}()
m.Run()
}
func setupEnv(t *testing.T) {
base = docker.NewDockerApp()
base.RunDBImage(t)
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
var err error
db, err = database.InitDB(
&database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
db, err = testApps.GetGormDBClient()
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
@@ -56,10 +51,11 @@ func tearDownEnv(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
sqlDB.Close()
base.Free()
}
func TestProverTaskOrm(t *testing.T) {
setupEnv(t)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))

View File

@@ -9,6 +9,8 @@ const (
ProverName = "prover_name"
// ProverVersion the prover version for context
ProverVersion = "prover_version"
// HardForkName the fork name for context
HardForkName = "hard_fork_name"
)
// Message the login message struct
@@ -16,6 +18,7 @@ type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
}
// LoginParameter for /login api

View File

@@ -2,7 +2,6 @@ package types
// GetTaskParameter for ProverTasks request parameter
type GetTaskParameter struct {
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
TaskType int `form:"task_type" json:"task_type"`
VK string `form:"vk" json:"vk"`

View File

@@ -0,0 +1,10 @@
package types
var (
// LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below.
LabelProverName = "prover_name"
// LabelProverPublicKey label name for prover public key
LabelProverPublicKey = "prover_pubkey"
// LabelProverVersion label name for prover version
LabelProverVersion = "prover_version"
)

View File

@@ -21,8 +21,7 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/message"
@@ -43,10 +42,9 @@ const (
)
var (
dbCfg *database.Config
conf *config.Config
conf *config.Config
base *docker.App
testApps *testcontainers.TestcontainerApps
db *gorm.DB
l2BlockOrm *orm.L2Block
@@ -70,13 +68,12 @@ var (
)
func TestMain(m *testing.M) {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
base = docker.NewDockerApp()
defer func() {
if testApps != nil {
testApps.Free()
}
}()
m.Run()
base.Free()
}
func randomURL() string {
@@ -86,7 +83,8 @@ func randomURL() string {
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
var err error
db, err = database.InitDB(dbCfg)
db, err = testApps.GetGormDBClient()
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
@@ -98,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
ChainID: 111,
},
ProverManager: &config.ProverManager{
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{MockMode: true},
ProversPerSession: proversPerSession,
Verifier: &config.VerifierConfig{
MockMode: true,
},
BatchCollectionTimeSec: 10,
ChunkCollectionTimeSec: 10,
MaxVerifierWorkers: 10,
@@ -115,6 +115,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
var chainConf params.ChainConfig
for forkName, forkNumber := range nameForkMap {
switch forkName {
case "shanghai":
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
case "bernoulli":
chainConf.BernoulliBlock = big.NewInt(forkNumber)
case "london":
@@ -149,20 +151,18 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
}
func setEnv(t *testing.T) {
var err error
version.Version = "v4.1.98"
base = docker.NewDockerApp()
base.RunDBImage(t)
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
dbCfg = &database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
var err error
db, err = database.InitDB(dbCfg)
db, err = testApps.GetGormDBClient()
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
@@ -199,7 +199,6 @@ func setEnv(t *testing.T) {
func TestApis(t *testing.T) {
// Set up the test environment.
base = docker.NewDockerApp()
setEnv(t)
t.Run("TestHandshake", testHandshake)
@@ -211,11 +210,6 @@ func TestApis(t *testing.T) {
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
t.Run("TestTimeoutProof", testTimeoutProof)
t.Run("TestHardFork", testHardForkAssignTask)
// Teardown
t.Cleanup(func() {
base.Free()
})
}
func testHandshake(t *testing.T) {
@@ -268,12 +262,12 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err)
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
@@ -284,12 +278,12 @@ func testGetTaskBlocked(t *testing.T) {
assert.NoError(t, err)
expectedErr = fmt.Errorf("get empty prover task")
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
}
@@ -309,12 +303,12 @@ func testOutdatedProverVersion(t *testing.T) {
assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
}
@@ -368,7 +362,7 @@ func testHardForkAssignTask(t *testing.T) {
{
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 0,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
@@ -458,7 +452,7 @@ func testHardForkAssignTask(t *testing.T) {
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
@@ -467,7 +461,7 @@ func testHardForkAssignTask(t *testing.T) {
{
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
proofType: message.ProofTypeBatch,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
exceptTaskNumber: 2,
proverForkNames: []string{"", "london"},
exceptGetTaskErrCodes: []int{types.Success, types.Success},
@@ -476,7 +470,7 @@ func testHardForkAssignTask(t *testing.T) {
{ // hard fork 2, prover 2 block [2-3]
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
proofType: message.ProofTypeChunk,
forkNumbers: map[string]int64{"london": forkNumberThree},
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
exceptTaskNumber: 1,
proverForkNames: []string{"", ""},
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
@@ -544,7 +538,7 @@ func testHardForkAssignTask(t *testing.T) {
continue
}
getTaskNumber++
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success)
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
}
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
})
@@ -587,7 +581,7 @@ func testValidProof(t *testing.T) {
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
assert.NotNil(t, proverTask)
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
}
// verify proof status
@@ -653,34 +647,21 @@ func testInvalidProof(t *testing.T) {
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
assert.NoError(t, err)
// create mock provers.
provers := make([]*mockProver, 2)
for i := 0; i < len(provers); i++ {
var proofType message.ProofType
if i%2 == 0 {
proofType = message.ProofTypeChunk
} else {
proofType = message.ProofTypeBatch
}
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
}
proofType := message.ProofTypeBatch
provingStatus := verifiedFailed
expectErrCode := types.ErrCoordinatorHandleZkProofFailure
prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
// verify proof status
var (
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
)
var (
chunkProofStatus types.ProvingStatus
tick = time.Tick(1500 * time.Millisecond)
tickStop = time.Tick(time.Minute)
batchProofStatus types.ProvingStatus
chunkActiveAttempts int16
chunkMaxAttempts int16
batchActiveAttempts int16
batchMaxAttempts int16
)
@@ -688,24 +669,17 @@ func testInvalidProof(t *testing.T) {
for {
select {
case <-tick:
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
if batchProofStatus == types.ProvingTaskAssigned {
return
}
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(chunkMaxAttempts))
assert.Equal(t, 0, int(chunkActiveAttempts))
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
assert.Equal(t, 1, int(batchMaxAttempts))
assert.Equal(t, 0, int(batchActiveAttempts))
case <-tickStop:
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String())
return
}
}
@@ -745,7 +719,7 @@ func testProofGeneratedFailed(t *testing.T) {
assert.NotNil(t, proverTask)
assert.Equal(t, errCode, types.Success)
assert.Equal(t, errMsg, "")
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
}
// verify proof status
@@ -868,14 +842,14 @@ func testTimeoutProof(t *testing.T) {
assert.NotNil(t, proverChunkTask2)
assert.Equal(t, chunkTask2ErrCode, types.Success)
assert.Equal(t, chunkTask2ErrMsg, "")
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
assert.NotNil(t, proverBatchTask2)
assert.Equal(t, batchTask2ErrCode, types.Success)
assert.Equal(t, batchTask2ErrMsg, "")
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
// verify proof status, it should be verified now, because second prover sent valid proof
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)

View File

@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
}
// connectToCoordinator sets up a websocket client to connect to the prover manager.
func (r *mockProver) connectToCoordinator(t *testing.T) string {
func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
challengeString := r.challenge(t)
return r.login(t, challengeString)
return r.login(t, challengeString, forkName)
}
func (r *mockProver) challenge(t *testing.T) string {
@@ -76,18 +76,32 @@ func (r *mockProver) challenge(t *testing.T) string {
return loginData.Token
}
func (r *mockProver) login(t *testing.T, challengeString string) string {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
},
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
var body string
if forkName != "" {
authMsg := message.AuthMsg{
Identity: &message.Identity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
HardForkName: forkName,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
} else {
authMsg := message.LegacyAuthMsg{
Identity: &message.LegacyIdentity{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
var result ctypes.Response
client := resty.New()
@@ -137,7 +151,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
// get task from coordinator
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
type response struct {
@@ -151,7 +165,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}).
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
SetResult(&result).
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
assert.NoError(t, err)
@@ -160,9 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
}
// Testing expected errors returned by coordinator.
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
//
//nolint:unparam
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
// get task from coordinator
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
type response struct {
@@ -185,7 +201,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
return result.ErrCode, result.ErrMsg
}
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
proofMsgStatus := message.StatusOk
if proofStatus == generatedFailed {
proofMsgStatus = message.StatusProofError
@@ -228,7 +244,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
submitProof.Proof = string(encodeData)
}
token := r.connectToCoordinator(t)
token := r.connectToCoordinator(t, forkName)
assert.NotEmpty(t, token)
submitProofData, err := json.Marshal(submitProof)

View File

@@ -20,6 +20,7 @@ require (
github.com/docker/go-connections v0.5.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/jackc/pgx/v5 v5.5.4 // indirect
github.com/klauspost/compress v1.17.4 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
@@ -31,7 +32,6 @@ require (
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/mod v0.16.0 // indirect
golang.org/x/sync v0.6.0 // indirect
golang.org/x/sys v0.17.0 // indirect

View File

@@ -58,8 +58,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=

View File

@@ -1,93 +1,89 @@
package migrate
import (
"database/sql"
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
"scroll-tech/common/testcontainers"
)
var (
base *docker.App
pgDB *sqlx.DB
testApps *testcontainers.TestcontainerApps
pgDB *sql.DB
)
func initEnv(t *testing.T) error {
func setupEnv(t *testing.T) {
// Start db container.
base.RunDBImage(t)
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
gormClient, err := testApps.GetGormDBClient()
assert.NoError(t, err)
pgDB, err = gormClient.DB()
assert.NoError(t, err)
}
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
func TestMain(m *testing.M) {
defer func() {
if testApps != nil {
testApps.Free()
}
}()
m.Run()
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
setupEnv(t)
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(0), cur)
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
status := Status(pgDB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, ResetDB(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(16), cur)
assert.Equal(t, int64(17), cur)
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(16), cur)
assert.Equal(t, int64(17), cur)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(16), version)
assert.Equal(t, int64(17), version)
assert.NoError(t, Rollback(pgDB.DB, nil))
assert.NoError(t, Rollback(pgDB, nil))
cur, err := Current(pgDB.DB)
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, version, cur+1)
targetVersion := int64(0)
assert.NoError(t, Rollback(pgDB.DB, &targetVersion))
assert.NoError(t, Rollback(pgDB, &targetVersion))
cur, err = Current(pgDB.DB)
cur, err = Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(0), cur)
}

View File

@@ -0,0 +1,27 @@
-- +goose Up
-- +goose StatementBegin
ALTER TABLE chunk
ADD COLUMN crc_max INTEGER DEFAULT 0,
ADD COLUMN blob_size INTEGER DEFAULT 0;
ALTER TABLE batch
ADD COLUMN data_hash VARCHAR DEFAULT '',
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
ADD COLUMN blob_size INTEGER DEFAULT 0;
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
ALTER TABLE IF EXISTS batch
DROP COLUMN data_hash,
DROP COLUMN blob_data_proof,
DROP COLUMN blob_size;
ALTER TABLE IF EXISTS chunk
DROP COLUMN crc_max,
DROP COLUMN blob_size;
-- +goose StatementEnd

View File

@@ -780,8 +780,6 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk=
github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0=
@@ -1492,8 +1490,6 @@ github.com/scroll-tech/go-ethereum v1.10.14-0.20230306131930-03b4de32b78b/go.mod
github.com/scroll-tech/go-ethereum v1.10.14-0.20230321020420-127af384ed04/go.mod h1:jH8c08L9K8Hieaf0r/ur2P/cpesn4dFhmLm2Mmoi8kI=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230802095950-4b2bbf6225e7/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
@@ -1563,6 +1559,9 @@ github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZy
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -1740,8 +1739,6 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4=
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
@@ -1771,6 +1768,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1955,6 +1953,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=

View File

@@ -21,14 +21,15 @@ import (
type CoordinatorClient struct {
client *resty.Client
proverName string
priv *ecdsa.PrivateKey
proverName string
hardForkName string
priv *ecdsa.PrivateKey
mu sync.Mutex
}
// NewCoordinatorClient constructs a new CoordinatorClient.
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
client := resty.New().
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
SetRetryCount(cfg.RetryCount).
@@ -50,9 +51,10 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
"retry wait time (second)", cfg.RetryWaitTimeSec)
return &CoordinatorClient{
client: client,
proverName: proverName,
priv: priv,
client: client,
proverName: proverName,
hardForkName: hardForkName,
priv: priv,
}, nil
}
@@ -83,6 +85,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
ProverVersion: version.Version,
ProverName: c.proverName,
Challenge: challengeResult.Data.Token,
HardForkName: c.hardForkName,
},
}
@@ -97,10 +100,12 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
}{
Challenge: authMsg.Identity.Challenge,
ProverName: authMsg.Identity.ProverName,
ProverVersion: authMsg.Identity.ProverVersion,
HardForkName: authMsg.Identity.HardForkName,
},
Signature: authMsg.Signature,
}

View File

@@ -25,6 +25,7 @@ type LoginRequest struct {
Challenge string `json:"challenge"`
ProverName string `json:"prover_name"`
ProverVersion string `json:"prover_version"`
HardForkName string `json:"hard_fork_name"`
} `json:"message"`
Signature string `json:"signature"`
}
@@ -41,7 +42,6 @@ type LoginResponse struct {
// GetTaskRequest defines the request structure for GetTask API
type GetTaskRequest struct {
HardForkName string `json:"hard_fork_name"`
TaskType message.ProofType `json:"task_type"`
ProverHeight uint64 `json:"prover_height,omitempty"`
VK string `json:"vk"`

View File

@@ -12,7 +12,7 @@ import (
"scroll-tech/prover/config"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
)
@@ -30,7 +30,7 @@ func getIndex() int {
type ProverApp struct {
Config *config.Config
base *docker.App
testApps *testcontainers.TestcontainerApps
originFile string
proverFile string
@@ -39,11 +39,11 @@ type ProverApp struct {
index int
name string
args []string
docker.AppAPI
*cmd.Cmd
}
// NewProverApp return a new proverApp manager.
func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
var proofType message.ProofType
switch mockName {
case utils.ChunkProverApp:
@@ -54,17 +54,17 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
return nil
}
name := string(mockName)
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", base.Timestamp, name)
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", testApps.Timestamp, name)
proverApp := &ProverApp{
base: base,
testApps: testApps,
originFile: file,
proverFile: proverFile,
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", base.Timestamp, name),
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", testApps.Timestamp, name),
index: getIndex(),
name: name,
args: []string{"--log.debug", "--config", proverFile},
}
proverApp.AppAPI = cmd.NewCmd(proverApp.name, proverApp.args...)
proverApp.Cmd = cmd.NewCmd(proverApp.name, proverApp.args...)
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
panic(err)
}
@@ -73,13 +73,13 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
// RunApp run prover-test child process by multi parameters.
func (r *ProverApp) RunApp(t *testing.T) {
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
r.Cmd.RunApp(func() bool { return r.Cmd.WaitResult(t, time.Second*40, "prover start successfully") })
}
// Free stop and release prover-test.
func (r *ProverApp) Free() {
if !utils.IsNil(r.AppAPI) {
r.AppAPI.WaitExit()
if !utils.IsNil(r.Cmd) {
r.Cmd.WaitExit()
}
_ = os.Remove(r.proverFile)
_ = os.Remove(r.Config.KeystorePath)
@@ -93,8 +93,13 @@ func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.Pro
return err
}
cfg.ProverName = fmt.Sprintf("%s_%d", r.name, r.index)
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.base.Timestamp, cfg.ProverName)
cfg.L2Geth.Endpoint = r.base.L2gethImg.Endpoint()
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.testApps.Timestamp, cfg.ProverName)
endpoint, err := r.testApps.GetL2GethEndPoint()
if err != nil {
return err
}
cfg.L2Geth.Endpoint = endpoint
cfg.L2Geth.Confirmations = rpc.LatestBlockNumber
// Reuse l1geth's keystore file
cfg.KeystorePassword = "scrolltest"

View File

@@ -7,8 +7,11 @@ import (
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -24,8 +27,7 @@ var (
paramsPath = flag.String("params", "/assets/test_params", "params dir")
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
proofDumpPath = flag.String("dump", "/assets/proof_data", "the path proofs dump to")
tracePath1 = flag.String("trace1", "/assets/traces/1_transfer.json", "chunk trace 1")
tracePath2 = flag.String("trace2", "/assets/traces/10_transfer.json", "chunk trace 2")
batchDirPath = flag.String("batch-dir", "/assets/traces/batch_24", "batch directory")
batchVkPath = flag.String("batch-vk", "/assets/test_assets/agg_vk.vkey", "batch vk")
chunkVkPath = flag.String("chunk-vk", "/assets/test_assets/chunk_vk.vkey", "chunk vk")
)
@@ -46,23 +48,34 @@ func TestFFI(t *testing.T) {
as.Equal(chunkProverCore.VK, readVk(*chunkVkPath, as))
t.Log("Chunk VK must be available when init")
chunkTrace1 := readChunkTrace(*tracePath1, as)
chunkTrace2 := readChunkTrace(*tracePath2, as)
t.Log("Loaded chunk traces")
// Get the list of subdirectories (chunks)
chunkDirs, err := os.ReadDir(*batchDirPath)
as.NoError(err)
sort.Slice(chunkDirs, func(i, j int) bool {
return chunkDirs[i].Name() < chunkDirs[j].Name()
})
chunkInfo1, err := chunkProverCore.TracesToChunkInfo(chunkTrace1)
as.NoError(err)
chunkInfo2, err := chunkProverCore.TracesToChunkInfo(chunkTrace2)
as.NoError(err)
t.Log("Converted to chunk infos")
chunkInfos := make([]*message.ChunkInfo, 0, len(chunkDirs))
chunkProofs := make([]*message.ChunkProof, 0, len(chunkDirs))
chunkProof1, err := chunkProverCore.ProveChunk("chunk_proof1", chunkTrace1)
as.NoError(err)
t.Log("Generated and dumped chunk proof 1")
for i, dir := range chunkDirs {
if dir.IsDir() {
chunkPath := filepath.Join(*batchDirPath, dir.Name())
chunkProof2, err := chunkProverCore.ProveChunk("chunk_proof2", chunkTrace2)
as.NoError(err)
t.Log("Generated and dumped chunk proof 2")
chunkTrace := readChunkTrace(chunkPath, as)
t.Logf("Loaded chunk trace %d", i+1)
chunkInfo, err := chunkProverCore.TracesToChunkInfo(chunkTrace)
as.NoError(err)
chunkInfos = append(chunkInfos, chunkInfo)
t.Logf("Converted to chunk info %d", i+1)
chunkProof, err := chunkProverCore.ProveChunk(fmt.Sprintf("chunk_proof%d", i+1), chunkTrace)
as.NoError(err)
chunkProofs = append(chunkProofs, chunkProof)
t.Logf("Generated and dumped chunk proof %d", i+1)
}
}
as.Equal(chunkProverCore.VK, readVk(*chunkVkPath, as))
t.Log("Chunk VKs must be equal after proving")
@@ -79,8 +92,6 @@ func TestFFI(t *testing.T) {
as.Equal(batchProverCore.VK, readVk(*batchVkPath, as))
t.Log("Batch VK must be available when init")
chunkInfos := []*message.ChunkInfo{chunkInfo1, chunkInfo2}
chunkProofs := []*message.ChunkProof{chunkProof1, chunkProof2}
_, err = batchProverCore.ProveBatch("batch_proof", chunkInfos, chunkProofs)
as.NoError(err)
t.Log("Generated and dumped batch proof")
@@ -88,20 +99,46 @@ func TestFFI(t *testing.T) {
as.Equal(batchProverCore.VK, readVk(*batchVkPath, as))
t.Log("Batch VKs must be equal after proving")
}
func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
f, err := os.Open(filePat)
as.NoError(err)
defer func() {
as.NoError(f.Close())
}()
byt, err := io.ReadAll(f)
fileInfo, err := os.Stat(filePat)
as.NoError(err)
trace := &types.BlockTrace{}
as.NoError(json.Unmarshal(byt, trace))
var traces []*types.BlockTrace
return []*types.BlockTrace{trace}
readFile := func(path string) {
f, err := os.Open(path)
as.NoError(err)
defer func() {
as.NoError(f.Close())
}()
byt, err := io.ReadAll(f)
as.NoError(err)
trace := &types.BlockTrace{}
as.NoError(json.Unmarshal(byt, trace))
traces = append(traces, trace)
}
if fileInfo.IsDir() {
files, err := os.ReadDir(filePat)
as.NoError(err)
// Sort files alphabetically
sort.Slice(files, func(i, j int) bool {
return files[i].Name() < files[j].Name()
})
for _, file := range files {
if !file.IsDir() {
readFile(filepath.Join(filePat, file.Name()))
}
}
} else {
readFile(filePat)
}
return traces
}
func readVk(filePat string, as *assert.Assertions) string {

View File

@@ -82,7 +82,7 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
}
log.Info("init prover_core successfully!")
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv)
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv)
if err != nil {
return nil, err
}
@@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error {
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
// prepare the request
req := &client.GetTaskRequest{
HardForkName: r.cfg.HardForkName,
TaskType: r.Type(),
TaskType: r.Type(),
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
// instead of passing vk when we login
VK: r.proverCore.VK,

View File

@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
}
})
log.Info("Start event-watcher successfully")
log.Info("Start event-watcher successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -109,7 +109,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
log.Info("Start gas-oracle successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -7,19 +7,19 @@ import (
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
"scroll-tech/common/utils"
"scroll-tech/rollup/internal/config"
"scroll-tech/common/cmd"
"scroll-tech/common/testcontainers"
"scroll-tech/common/utils"
)
// MockApp mockApp-test client manager.
type MockApp struct {
Config *config.Config
base *docker.App
Config *config.Config
testApps *testcontainers.TestcontainerApps
mockApps map[utils.MockAppName]docker.AppAPI
mockApps map[utils.MockAppName]*cmd.Cmd
originFile string
rollupFile string
@@ -27,13 +27,12 @@ type MockApp struct {
args []string
}
// NewRollupApp return a new rollupApp manager, name mush be one them.
func NewRollupApp(base *docker.App, file string) *MockApp {
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", base.Timestamp)
// NewRollupApp return a new rollupApp manager.
func NewRollupApp(testApps *testcontainers.TestcontainerApps, file string) *MockApp {
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", testApps.Timestamp)
rollupApp := &MockApp{
base: base,
mockApps: make(map[utils.MockAppName]docker.AppAPI),
testApps: testApps,
mockApps: make(map[utils.MockAppName]*cmd.Cmd),
originFile: file,
rollupFile: rollupFile,
args: []string{"--log.debug", "--config", rollupFile},
@@ -69,7 +68,7 @@ func (b *MockApp) WaitExit() {
for _, app := range b.mockApps {
app.WaitExit()
}
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
b.mockApps = make(map[utils.MockAppName]*cmd.Cmd)
}
// Free stop and release rollup mocked apps.
@@ -80,18 +79,29 @@ func (b *MockApp) Free() {
// MockConfig creates a new rollup config.
func (b *MockApp) MockConfig(store bool) error {
base := b.base
// Load origin rollup config file.
cfg, err := config.NewConfig(b.originFile)
if err != nil {
return err
}
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.DBConfig.DSN = base.DBImg.Endpoint()
l1GethEndpoint, err := b.testApps.GetL1GethEndPoint()
if err != nil {
return err
}
l2GethEndpoint, err := b.testApps.GetL2GethEndPoint()
if err != nil {
return err
}
dbEndpoint, err := b.testApps.GetDBEndPoint()
if err != nil {
return err
}
cfg.L1Config.Endpoint = l1GethEndpoint
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2GethEndpoint
cfg.L2Config.Endpoint = l2GethEndpoint
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1GethEndpoint
cfg.DBConfig.DSN = dbEndpoint
b.Config = cfg
if !store {

View File

@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
log.Info("Start rollup-relayer successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)

View File

@@ -42,4 +42,7 @@ type BatchProposerConfig struct {
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
EnableTestEnvSamplingFeature bool `json:"enable_test_env_sampling_feature,omitempty"`
SamplingPercentage uint64 `json:"sampling_percentage,omitempty"`
}

View File

@@ -64,6 +64,9 @@ type RelayerConfig struct {
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
EnableTestEnvSamplingFeature bool `json:"enable_test_env_sampling_feature,omitempty"`
SamplingPercentage uint64 `json:"sampling_percentage,omitempty"`
}
// GasOracleConfig The config for updating gas price oracle.
@@ -128,6 +131,10 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
return fmt.Errorf("error converting and checking finalize sender private key: %w", err)
}
if r.SamplingPercentage == 0 {
r.SamplingPercentage = 100
}
return nil
}

View File

@@ -464,8 +464,9 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", batch.Hash)
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
if err := r.finalizeBatch(batch, true); err != nil {
log.Error("Failed to finalize batch with proof", "index", batch.Index, "hash", batch.Hash, "err", err)
skipProof := r.cfg.EnableTestEnvSamplingFeature && ((batch.Index % 100) >= r.cfg.SamplingPercentage)
if err := r.finalizeBatch(batch, !skipProof); err != nil {
log.Error("Failed to finalize batch", "index", batch.Index, "hash", batch.Hash, "withProof", !skipProof, "err", err)
}
case types.ProvingTaskFailed:
@@ -585,6 +586,24 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
return err
}
// // Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
// // it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
// if !withProof {
// txErr := r.db.Transaction(func(tx *gorm.DB) error {
// if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
// return updateErr
// }
// if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
// return updateErr
// }
// return nil
// })
// if txErr != nil {
// log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr)
// }
// }
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
return nil
}

View File

@@ -7,6 +7,7 @@ import (
"net/http"
"strings"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
"github.com/gin-gonic/gin"
@@ -181,9 +182,9 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
assert.NoError(t, err)
chunkOrm := orm.NewChunk(db)
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
assert.NoError(t, err)
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
assert.NoError(t, err)
batch := &encoding.Batch{
@@ -200,11 +201,30 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
assert.NoError(t, err)
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
time.Sleep(time.Second)
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
if batchErr != nil {
return false
}
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
if chunkErr != nil {
return false
}
batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing &&
types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
return batchStatus && chunkStatus
})
assert.True(t, ok)
relayer.StopSenders()

View File

@@ -14,8 +14,8 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
"scroll-tech/common/docker"
dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
@@ -26,7 +26,7 @@ var (
// config
cfg *config.Config
base *docker.App
testApps *testcontainers.TestcontainerApps
posL1TestEnv *dockercompose.PoSL1TestEnv
// l2geth client
@@ -53,16 +53,25 @@ func setupEnv(t *testing.T) {
cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err)
base.RunL2Geth(t)
base.RunDBImage(t)
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
assert.NoError(t, err, "failed to create PoS L1 test environment")
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL2GethContainer())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
dsn, err := testApps.GetDBEndPoint()
assert.NoError(t, err)
cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
DSN: dsn,
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
port, err := rand.Int(rand.Reader, big.NewInt(10000))
assert.NoError(t, err)
@@ -70,7 +79,7 @@ func setupEnv(t *testing.T) {
cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
// Create l2geth client.
l2Cli, err = base.L2Client()
l2Cli, err = testApps.GetL2GethClient()
assert.NoError(t, err)
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
@@ -97,19 +106,14 @@ func setupEnv(t *testing.T) {
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
base.Free()
var err error
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
if err != nil {
log.Crit("failed to create PoS L1 test environment", "err", err)
}
if err := posL1TestEnv.Start(); err != nil {
log.Crit("failed to start PoS L1 test environment", "err", err)
}
defer posL1TestEnv.Stop()
defer func() {
if testApps != nil {
testApps.Free()
}
if posL1TestEnv != nil {
posL1TestEnv.Stop()
}
}()
m.Run()
}

View File

@@ -26,9 +26,8 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker"
dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
@@ -38,12 +37,10 @@ import (
"scroll-tech/rollup/mock_bridge"
)
const TXBatch = 50
var (
privateKey *ecdsa.PrivateKey
cfg *config.Config
base *docker.App
testApps *testcontainers.TestcontainerApps
posL1TestEnv *dockercompose.PoSL1TestEnv
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
@@ -53,19 +50,14 @@ var (
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
defer base.Free()
var err error
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
if err != nil {
log.Crit("failed to create PoS L1 test environment", "err", err)
}
if err := posL1TestEnv.Start(); err != nil {
log.Crit("failed to start PoS L1 test environment", "err", err)
}
defer posL1TestEnv.Stop()
defer func() {
if testApps != nil {
testApps.Free()
}
if posL1TestEnv != nil {
posL1TestEnv.Stop()
}
}()
m.Run()
}
@@ -81,17 +73,18 @@ func setupEnv(t *testing.T) {
assert.NoError(t, err)
privateKey = priv
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
assert.NoError(t, err, "failed to create PoS L1 test environment")
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
base.RunDBImage(t)
db, err = database.InitDB(
&database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
db, err = testApps.GetGormDBClient()
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)

View File

@@ -14,6 +14,7 @@ import (
"gorm.io/gorm"
"scroll-tech/common/forks"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/rollup/internal/config"
@@ -37,6 +38,7 @@ type BatchProposer struct {
gasCostIncreaseMultiplier float64
forkMap map[uint64]bool
cfg *config.BatchProposerConfig
chainCfg *params.ChainConfig
batchProposerCircleTotal prometheus.Counter
@@ -74,6 +76,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
forkMap: forkMap,
cfg: cfg,
chainCfg: chainCfg,
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
@@ -144,6 +147,27 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
return dbErr
}
skipProof := false
if p.cfg.EnableTestEnvSamplingFeature && ((batch.Index % 100) >= p.cfg.SamplingPercentage) {
skipProof = true
}
if skipProof {
dbErr = p.batchOrm.UpdateProvingStatus(p.ctx, batch.Hash, types.ProvingTaskVerified, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB update batch proving_status failure",
"batch hash", batch.Hash, "error", dbErr)
return dbErr
}
dbErr = p.chunkOrm.UpdateProvingStatusInRange(p.ctx, batch.StartChunkIndex, batch.EndChunkIndex, types.ProvingTaskVerified, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB update chunk proving_status failure",
"start chunk index", batch.StartChunkIndex, "end chunk index", batch.EndChunkIndex,
"batch hash", batch.Hash, "error", dbErr)
return dbErr
}
}
return nil
})
if err != nil {

View File

@@ -27,7 +27,7 @@ import (
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
db := setupDB(t)
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
client, err := testApps.GetL1GethClient()
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)

View File

@@ -11,9 +11,8 @@ import (
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/encoding"
"scroll-tech/database/migrate"
"scroll-tech/rollup/internal/config"
@@ -23,7 +22,7 @@ var (
// config
cfg *config.Config
base *docker.App
testApps *testcontainers.TestcontainerApps
// l2geth client
l2Cli *ethclient.Client
@@ -42,19 +41,27 @@ func setupEnv(t *testing.T) (err error) {
cfg, err = config.NewConfig("../../../conf/config.json")
assert.NoError(t, err)
base.RunImages(t)
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL1GethEndPoint()
assert.NoError(t, err)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
assert.NoError(t, err)
dsn, err := testApps.GetDBEndPoint()
assert.NoError(t, err)
cfg.DBConfig = &database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
DSN: dsn,
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
// Create l2geth client.
l2Cli, err = base.L2Client()
l2Cli, err = testApps.GetL2GethClient()
assert.NoError(t, err)
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
@@ -73,11 +80,12 @@ func setupDB(t *testing.T) *gorm.DB {
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
defer func() {
if testApps != nil {
testApps.Free()
}
}()
m.Run()
base.Free()
}
func TestFunction(t *testing.T) {

View File

@@ -25,6 +25,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -53,6 +54,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
@@ -257,6 +262,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
newBatch := Batch{
Index: batch.Index,
Hash: batchMeta.BatchHash.Hex(),
DataHash: batchMeta.BatchDataHash.Hex(),
StartChunkHash: batchMeta.StartChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: batchMeta.EndChunkHash.Hex(),
@@ -271,6 +277,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
BlobDataProof: batchMeta.BatchBlobDataProof,
BlobSize: metrics.L1CommitBlobSize,
}
db := o.db

View File

@@ -44,6 +44,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -140,6 +144,20 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) (
return chunks, nil
}
// GetChunksByBatchHash retrieves chunks by batch hash
// for test
func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash = ?", batchHash)
var chunks []*Chunk
if err := db.Find(&chunks).Error; err != nil {
return nil, fmt.Errorf("Chunk.GetChunksByBatchHash error: %w", err)
}
return chunks, nil
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
@@ -198,6 +216,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
CrcMax: metrics.CrcMax,
BlobSize: metrics.L1CommitBlobSize,
}
db := o.db
@@ -242,6 +262,34 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
return nil
}
// UpdateProvingStatusByBatchHash updates the proving_status for chunks within the specified batch_hash
func (o *Chunk) UpdateProvingStatusByBatchHash(ctx context.Context, batchHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("batch_hash = ?", batchHash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatusByBatchHash error: %w, batch hash: %v, status: %v", err, batchHash, status.String())
}
return nil
}
// UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive).
// The range is closed, i.e., it includes both start and end indices.
func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {
@@ -258,3 +306,28 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
}
return nil
}
func (o *Chunk) UpdateProvingStatusInRange(ctx context.Context, startIndex uint64, endIndex uint64, status types.ProvingStatus, dbTX ...*gorm.DB) error {
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("index >= ? AND index <= ?", startIndex, endIndex)
updateFields := make(map[string]interface{})
updateFields["proving_status"] = int(status)
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Chunk.UpdateProvingStatusInRange error: %w, start index: %v, end index: %v, status: %v", err, startIndex, endIndex, status.String())
}
return nil
}

View File

@@ -12,17 +12,17 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/database/migrate"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types"
"scroll-tech/common/types/encoding"
"scroll-tech/common/types/encoding/codecv0"
"scroll-tech/common/types/encoding/codecv1"
"scroll-tech/database/migrate"
)
var (
base *docker.App
testApps *testcontainers.TestcontainerApps
db *gorm.DB
l2BlockOrm *L2Block
@@ -36,23 +36,23 @@ var (
func TestMain(m *testing.M) {
t := &testing.T{}
defer func() {
if testApps != nil {
testApps.Free()
}
tearDownEnv(t)
}()
setupEnv(t)
defer tearDownEnv(t)
m.Run()
}
func setupEnv(t *testing.T) {
base = docker.NewDockerApp()
base.RunDBImage(t)
var err error
db, err = database.InitDB(
&database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
},
)
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
db, err = testApps.GetGormDBClient()
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
@@ -80,7 +80,6 @@ func tearDownEnv(t *testing.T) {
sqlDB, err := db.DB()
assert.NoError(t, err)
sqlDB.Close()
base.Free()
}
func TestL1BlockOrm(t *testing.T) {

View File

@@ -191,10 +191,12 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code
// BatchMetadata represents the metadata of a batch.
type BatchMetadata struct {
BatchHash common.Hash
BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
BatchHash common.Hash
BatchDataHash common.Hash
BatchBlobDataProof []byte
BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
}
// GetBatchMetadata retrieves the metadata of a batch.
@@ -212,9 +214,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
}
// BatchBlobDataProof is left as empty for codecv0.
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash,
BatchBytes: daBatch.Encode(),
}
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
@@ -243,9 +247,16 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
}
blobDataProof, err := daBatch.BlobDataProof()
if err != nil {
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
}
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash,
BatchBlobDataProof: blobDataProof,
BatchBytes: daBatch.Encode(),
}
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)

View File

@@ -11,6 +11,13 @@ import (
"testing"
"time"
"scroll-tech/database/migrate"
"scroll-tech/common/database"
dockercompose "scroll-tech/common/docker-compose/l1"
tc "scroll-tech/common/testcontainers"
"scroll-tech/common/utils"
"github.com/gin-gonic/gin"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
@@ -22,19 +29,12 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/docker"
dockercompose "scroll-tech/common/docker-compose/l1"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
bcmd "scroll-tech/rollup/cmd"
"scroll-tech/rollup/mock_bridge"
)
var (
base *docker.App
testApps *tc.TestcontainerApps
rollupApp *bcmd.MockApp
posL1TestEnv *dockercompose.PoSL1TestEnv
@@ -47,11 +47,14 @@ var (
)
func setupDB(t *testing.T) *gorm.DB {
dsn, err := testApps.GetDBEndPoint()
assert.NoError(t, err)
cfg := &database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
DSN: dsn,
DriverName: "postgres",
MaxOpenNum: 200,
MaxIdleNum: 20,
}
db, err := database.InitDB(cfg)
assert.NoError(t, err)
@@ -62,22 +65,17 @@ func setupDB(t *testing.T) *gorm.DB {
}
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
defer base.Free()
rollupApp = bcmd.NewRollupApp(base, "../conf/config.json")
defer rollupApp.Free()
var err error
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
if err != nil {
log.Crit("failed to create PoS L1 test environment", "err", err)
}
if err := posL1TestEnv.Start(); err != nil {
log.Crit("failed to start PoS L1 test environment", "err", err)
}
defer posL1TestEnv.Stop()
defer func() {
if testApps != nil {
testApps.Free()
}
if rollupApp != nil {
rollupApp.Free()
}
if posL1TestEnv != nil {
posL1TestEnv.Stop()
}
}()
m.Run()
}
@@ -86,19 +84,26 @@ func setupEnv(t *testing.T) {
glogger.Verbosity(log.LvlInfo)
log.Root().SetHandler(glogger)
var err error
var (
err error
l1GethChainID *big.Int
)
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
assert.NoError(t, err, "failed to create PoS L1 test environment")
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
testApps = tc.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
rollupApp = bcmd.NewRollupApp(testApps, "../conf/config.json")
l1Client, err = posL1TestEnv.L1Client()
assert.NoError(t, err)
chainID, err := l1Client.ChainID(context.Background())
l2Client, err = testApps.GetL2GethClient()
assert.NoError(t, err)
l1Auth, err = bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, chainID)
assert.NoError(t, err)
rollupApp.Config.L1Config.Endpoint = posL1TestEnv.Endpoint()
rollupApp.Config.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
base.RunImages(t)
l2Client, err = base.L2Client()
l1GethChainID, err = l1Client.ChainID(context.Background())
assert.NoError(t, err)
l1Cfg, l2Cfg := rollupApp.Config.L1Config, rollupApp.Config.L2Config
@@ -107,6 +112,11 @@ func setupEnv(t *testing.T) {
l2Cfg.Confirmations = 0
l2Cfg.RelayerConfig.SenderConfig.Confirmations = 0
l1Auth, err = bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, l1GethChainID)
assert.NoError(t, err)
rollupApp.Config.L1Config.Endpoint = posL1TestEnv.Endpoint()
rollupApp.Config.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
port, err := rand.Int(rand.Reader, big.NewInt(10000))
assert.NoError(t, err)
svrPort := strconv.FormatInt(port.Int64()+40000, 10)

View File

@@ -20,11 +20,11 @@ var (
greeterAddress = common.HexToAddress("0x7363726f6c6c6c20000000000000000000000015")
)
func TestERC20(t *testing.T) {
base.RunL2Geth(t)
func testERC20(t *testing.T) {
assert.NoError(t, testApps.StartL2GethContainer())
time.Sleep(time.Second * 3)
l2Cli, err := base.L2Client()
l2Cli, err := testApps.GetL2GethClient()
assert.Nil(t, err)
token, err := erc20.NewERC20Mock(erc20Address, l2Cli)
@@ -32,7 +32,9 @@ func TestERC20(t *testing.T) {
privKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privKey, base.L2gethImg.ChainID())
chainID, err := l2Cli.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privKey, chainID)
assert.NoError(t, err)
authBls0, err := token.BalanceOf(nil, auth.From)
@@ -45,7 +47,8 @@ func TestERC20(t *testing.T) {
value := big.NewInt(1000)
tx, err := token.Transfer(auth, erc20Address, value)
assert.NoError(t, err)
bind.WaitMined(context.Background(), l2Cli, tx)
_, err = bind.WaitMined(context.Background(), l2Cli, tx)
assert.NoError(t, err)
authBls1, err := token.BalanceOf(nil, auth.From)
assert.NoError(t, err)
@@ -58,12 +61,14 @@ func TestERC20(t *testing.T) {
assert.Equal(t, tokenBls1.Int64(), tokenBls0.Add(tokenBls0, value).Int64())
}
func TestGreeter(t *testing.T) {
base.RunL2Geth(t)
l2Cli, err := base.L2Client()
func testGreeter(t *testing.T) {
assert.NoError(t, testApps.StartL2GethContainer())
l2Cli, err := testApps.GetL2GethClient()
assert.Nil(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, base.L2gethImg.ChainID())
chainID, err := l2Cli.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, chainID)
assert.NoError(t, err)
token, err := greeter.NewGreeter(greeterAddress, l2Cli)

View File

@@ -10,52 +10,63 @@ import (
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/integration-test/orm"
rapp "scroll-tech/prover/cmd/app"
"scroll-tech/database/migrate"
capp "scroll-tech/coordinator/cmd/api/app"
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/testcontainers"
"scroll-tech/common/types/encoding"
"scroll-tech/common/utils"
"scroll-tech/common/version"
capp "scroll-tech/coordinator/cmd/api/app"
"scroll-tech/database/migrate"
"scroll-tech/integration-test/orm"
rapp "scroll-tech/prover/cmd/app"
bcmd "scroll-tech/rollup/cmd"
)
var (
base *docker.App
testApps *testcontainers.TestcontainerApps
rollupApp *bcmd.MockApp
)
func TestMain(m *testing.M) {
base = docker.NewDockerApp()
rollupApp = bcmd.NewRollupApp(base, "../../rollup/conf/config.json")
defer func() {
if testApps != nil {
testApps.Free()
}
if rollupApp != nil {
rollupApp.Free()
}
}()
m.Run()
rollupApp.Free()
base.Free()
}
func TestCoordinatorProverInteraction(t *testing.T) {
// Start postgres docker containers
base.RunL2Geth(t)
base.RunDBImage(t)
func setupEnv(t *testing.T) {
testApps = testcontainers.NewTestcontainerApps()
assert.NoError(t, testApps.StartPostgresContainer())
assert.NoError(t, testApps.StartL1GethContainer())
assert.NoError(t, testApps.StartL2GethContainer())
rollupApp = bcmd.NewRollupApp(testApps, "../../rollup/conf/config.json")
}
// Init data
dbCfg := &database.Config{
DSN: base.DBConfig.DSN,
DriverName: base.DBConfig.DriverName,
MaxOpenNum: base.DBConfig.MaxOpenNum,
MaxIdleNum: base.DBConfig.MaxIdleNum,
}
func TestFunction(t *testing.T) {
setupEnv(t)
t.Run("TestCoordinatorProverInteraction", testCoordinatorProverInteraction)
t.Run("TestProverReLogin", testProverReLogin)
t.Run("TestERC20", testERC20)
t.Run("TestGreeter", testGreeter)
}
db, err := database.InitDB(dbCfg)
func setupDB(t *testing.T) *gorm.DB {
db, err := testApps.GetGormDBClient()
assert.NoError(t, err)
sqlDB, err := db.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(sqlDB))
return db
}
func testCoordinatorProverInteraction(t *testing.T) {
db := setupDB(t)
sqlDB, err := db.DB()
assert.NoError(t, err)
@@ -66,7 +77,7 @@ func TestCoordinatorProverInteraction(t *testing.T) {
l2BlockOrm := orm.NewL2Block(db)
// Connect to l2geth client
l2Client, err := base.L2Client()
l2Client, err := testApps.GetL2GethClient()
if err != nil {
log.Fatalf("Failed to connect to the l2geth client: %v", err)
}
@@ -111,10 +122,9 @@ func TestCoordinatorProverInteraction(t *testing.T) {
assert.NoError(t, err)
t.Log(version.Version)
base.Timestamp = time.Now().Nanosecond()
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json", "./genesis.json")
chunkProverApp := rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp := rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
coordinatorApp := capp.NewCoordinatorApp(testApps, "../../coordinator/conf/config.json", "./genesis.json")
chunkProverApp := rapp.NewProverApp(testApps, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp := rapp.NewProverApp(testApps, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
defer coordinatorApp.Free()
defer chunkProverApp.Free()
defer batchProverApp.Free()
@@ -139,17 +149,16 @@ func TestCoordinatorProverInteraction(t *testing.T) {
coordinatorApp.WaitExit()
}
func TestProverReLogin(t *testing.T) {
// Start postgres docker containers.
base.RunL2Geth(t)
base.RunDBImage(t)
func testProverReLogin(t *testing.T) {
client, err := testApps.GetGormDBClient()
assert.NoError(t, err)
db, err := client.DB()
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db))
assert.NoError(t, migrate.ResetDB(base.DBClient(t)))
base.Timestamp = time.Now().Nanosecond()
coordinatorApp := capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json", "./genesis.json")
chunkProverApp := rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp := rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
coordinatorApp := capp.NewCoordinatorApp(testApps, "../../coordinator/conf/config.json", "./genesis.json")
chunkProverApp := rapp.NewProverApp(testApps, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp := rapp.NewProverApp(testApps, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
defer coordinatorApp.Free()
defer chunkProverApp.Free()
defer batchProverApp.Free()

View File

@@ -21,6 +21,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
@@ -49,6 +50,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
@@ -151,6 +156,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{
Index: batch.Index,
Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(),
@@ -163,6 +169,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

View File

@@ -43,6 +43,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
@@ -150,6 +154,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}
db := o.db

View File

@@ -4,9 +4,10 @@ import (
"context"
"encoding/json"
"fmt"
"scroll-tech/common/types/encoding"
"time"
"scroll-tech/common/types/encoding"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
)