mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
7 Commits
v4.3.84
...
deploy_2_v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
037616f509 | ||
|
|
b999e5b893 | ||
|
|
b3093e9eb6 | ||
|
|
3d5250e52d | ||
|
|
b7324c76bc | ||
|
|
6d6e98bd6e | ||
|
|
9e35ce0ab4 |
@@ -23,6 +23,7 @@ type FetcherConfig struct {
|
||||
DAIGatewayAddr string `json:"DAIGatewayAddr"`
|
||||
USDCGatewayAddr string `json:"USDCGatewayAddr"`
|
||||
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
|
||||
PufferGatewayAddr string `json:"PufferGatewayAddr"`
|
||||
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
|
||||
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
|
||||
@@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
|
||||
@@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
l1StartPort = 10000
|
||||
l2StartPort = 20000
|
||||
dbStartPort = 30000
|
||||
)
|
||||
|
||||
// AppAPI app interface.
|
||||
type AppAPI interface {
|
||||
IsRunning() bool
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
|
||||
}
|
||||
|
||||
// App is collection struct of runtime docker images
|
||||
type App struct {
|
||||
L1gethImg GethImgInstance
|
||||
L2gethImg GethImgInstance
|
||||
DBImg ImgInstance
|
||||
|
||||
dbClient *sql.DB
|
||||
DBConfig *database.DBConfig
|
||||
DBConfigFile string
|
||||
|
||||
// common time stamp.
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewDockerApp returns new instance of dockerApp struct
|
||||
func NewDockerApp() *App {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
app := &App{
|
||||
Timestamp: timestamp,
|
||||
L1gethImg: newTestL1Docker(),
|
||||
L2gethImg: newTestL2Docker(),
|
||||
DBImg: newTestDBDocker("postgres"),
|
||||
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
|
||||
}
|
||||
if err := app.mockDBConfig(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
// RunImages runs all images togather
|
||||
func (b *App) RunImages(t *testing.T) {
|
||||
b.RunDBImage(t)
|
||||
b.RunL1Geth(t)
|
||||
b.RunL2Geth(t)
|
||||
}
|
||||
|
||||
// RunDBImage starts postgres docker container.
|
||||
func (b *App) RunDBImage(t *testing.T) {
|
||||
if b.DBImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.DBImg.Start())
|
||||
|
||||
// try 5 times until the db is ready.
|
||||
ok := utils.TryTimes(10, func() bool {
|
||||
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
|
||||
return err == nil && db != nil && db.Ping() == nil
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
// Free clear all running images, double check and recycle docker container.
|
||||
func (b *App) Free() {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
_ = b.L1gethImg.Stop()
|
||||
}
|
||||
if b.L2gethImg.IsRunning() {
|
||||
_ = b.L2gethImg.Stop()
|
||||
}
|
||||
if b.DBImg.IsRunning() {
|
||||
_ = b.DBImg.Stop()
|
||||
_ = os.Remove(b.DBConfigFile)
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
_ = b.dbClient.Close()
|
||||
b.dbClient = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunL1Geth starts l1geth docker container.
|
||||
func (b *App) RunL1Geth(t *testing.T) {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.L1gethImg.Start())
|
||||
}
|
||||
|
||||
// L1Client returns a ethclient by dialing running l1geth
|
||||
func (b *App) L1Client() (*ethclient.Client, error) {
|
||||
if utils.IsNil(b.L1gethImg) {
|
||||
return nil, fmt.Errorf("l1 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// RunL2Geth starts l2geth docker container.
|
||||
func (b *App) RunL2Geth(t *testing.T) {
|
||||
if b.L2gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.L2gethImg.Start())
|
||||
}
|
||||
|
||||
// L2Client returns a ethclient by dialing running l2geth
|
||||
func (b *App) L2Client() (*ethclient.Client, error) {
|
||||
if utils.IsNil(b.L2gethImg) {
|
||||
return nil, fmt.Errorf("l2 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// DBClient create and return *sql.DB instance.
|
||||
func (b *App) DBClient(t *testing.T) *sql.DB {
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
return b.dbClient
|
||||
}
|
||||
var (
|
||||
cfg = b.DBConfig
|
||||
err error
|
||||
)
|
||||
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
|
||||
assert.NoError(t, err)
|
||||
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
|
||||
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
|
||||
assert.NoError(t, b.dbClient.Ping())
|
||||
return b.dbClient
|
||||
}
|
||||
|
||||
func (b *App) mockDBConfig() error {
|
||||
b.DBConfig = &database.DBConfig{
|
||||
DSN: "",
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
|
||||
if b.DBImg != nil {
|
||||
b.DBConfig.DSN = b.DBImg.Endpoint()
|
||||
}
|
||||
data, err := json.Marshal(b.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
|
||||
}
|
||||
|
||||
func newTestL1Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestL2Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestDBDocker(driverName string) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgDB the postgres image manager.
|
||||
type ImgDB struct {
|
||||
image string
|
||||
name string
|
||||
id string
|
||||
|
||||
dbName string
|
||||
port int
|
||||
password string
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgDB return postgres db img instance.
|
||||
func NewImgDB(image, password, dbName string, port int) ImgInstance {
|
||||
img := &ImgDB{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
|
||||
password: password,
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start postgres db container.
|
||||
func (i *ImgDB) Start() error {
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the container.
|
||||
func (i *ImgDB) Stop() error {
|
||||
if !i.running {
|
||||
return nil
|
||||
}
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// stop the running container.
|
||||
if i.id == "" {
|
||||
i.id = GetContainerID(i.name)
|
||||
}
|
||||
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
// Endpoint return the dsn.
|
||||
func (i *ImgDB) Endpoint() string {
|
||||
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgDB) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
}
|
||||
|
||||
cmd = append(cmd, envs...)
|
||||
return append(cmd, i.image)
|
||||
}
|
||||
|
||||
func (i *ImgDB) isOk() bool {
|
||||
keyword := "database system is ready to accept connections"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
// Start cmd in parallel.
|
||||
i.cmd.RunCmd(true)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
utils.TryTimes(20, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 20):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgGeth the geth image manager include l1geth and l2geth.
|
||||
type ImgGeth struct {
|
||||
image string
|
||||
name string
|
||||
id string
|
||||
|
||||
volume string
|
||||
ipcPath string
|
||||
httpPort int
|
||||
wsPort int
|
||||
chainID *big.Int
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgGeth return geth img instance.
|
||||
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
|
||||
img := &ImgGeth{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
|
||||
volume: volume,
|
||||
ipcPath: ipc,
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.params()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start run image and check if it is running healthily.
|
||||
func (i *ImgGeth) Start() error {
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
|
||||
// try 10 times to get chainID until is ok.
|
||||
utils.TryTimes(10, func() bool {
|
||||
client, err := ethclient.Dial(i.Endpoint())
|
||||
if err == nil && client != nil {
|
||||
i.chainID, err = client.ChainID(context.Background())
|
||||
return err == nil && i.chainID != nil
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgGeth) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
// Endpoint return the connection endpoint.
|
||||
func (i *ImgGeth) Endpoint() string {
|
||||
switch true {
|
||||
case i.httpPort != 0:
|
||||
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
|
||||
case i.wsPort != 0:
|
||||
return fmt.Sprintf("ws://127.0.0.1:%d", i.wsPort)
|
||||
default:
|
||||
return i.ipcPath
|
||||
}
|
||||
}
|
||||
|
||||
// ChainID return chainID.
|
||||
func (i *ImgGeth) ChainID() *big.Int {
|
||||
return i.chainID
|
||||
}
|
||||
|
||||
func (i *ImgGeth) isOk() bool {
|
||||
keyword := "WebSocket enabled"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
// Start cmd in parallel.
|
||||
i.cmd.RunCmd(true)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
utils.TryTimes(20, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 10):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
|
||||
// Stop the docker container.
|
||||
func (i *ImgGeth) Stop() error {
|
||||
if !i.running {
|
||||
return nil
|
||||
}
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// check if container is running, stop the running container.
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
i.id = id
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
func (i *ImgGeth) params() []string {
|
||||
cmds := []string{"run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
}
|
||||
if i.wsPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.wsPort) + ":8546"}...)
|
||||
}
|
||||
|
||||
var envs []string
|
||||
if i.ipcPath != "" {
|
||||
envs = append(envs, []string{"-e", fmt.Sprintf("IPC_PATH=%s", i.ipcPath)}...)
|
||||
}
|
||||
|
||||
if i.volume != "" {
|
||||
cmds = append(cmds, []string{"-v", fmt.Sprintf("%s:%s", i.volume, i.volume)}...)
|
||||
}
|
||||
|
||||
cmds = append(cmds, ports...)
|
||||
cmds = append(cmds, envs...)
|
||||
|
||||
return append(cmds, i.image)
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
base.RunDBImage(t)
|
||||
|
||||
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.Ping())
|
||||
}
|
||||
|
||||
func TestL1Geth(t *testing.T) {
|
||||
base.RunL1Geth(t)
|
||||
|
||||
client, err := base.L1Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func TestL2Geth(t *testing.T) {
|
||||
base.RunL2Geth(t)
|
||||
|
||||
client, err := base.L2Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
@@ -9,7 +9,6 @@ require (
|
||||
github.com/docker/docker v25.0.3+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
@@ -144,7 +143,6 @@ require (
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
|
||||
@@ -268,7 +268,6 @@ github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXS
|
||||
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
|
||||
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
@@ -400,8 +399,6 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
@@ -443,7 +440,6 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
@@ -469,9 +465,6 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
|
||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -528,6 +530,76 @@ func TestCodecV1BatchChallenge(t *testing.T) {
|
||||
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:]))
|
||||
}
|
||||
|
||||
func repeat(element byte, count int) string {
|
||||
result := make([]byte, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
result = append(result, element)
|
||||
}
|
||||
return "0x" + common.Bytes2Hex(result)
|
||||
}
|
||||
|
||||
func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
|
||||
nRowsData := 126914
|
||||
|
||||
for _, tc := range []struct {
|
||||
chunks [][]string
|
||||
expectedz string
|
||||
expectedy string
|
||||
}{
|
||||
// single empty chunk
|
||||
{chunks: [][]string{{}}, expectedz: "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", expectedy: "28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df51"},
|
||||
// single non-empty chunk
|
||||
{chunks: [][]string{{"0x010203"}}, expectedz: "30a9d6cfc2b87fb00d80e7fea28ebb9eff0bd526dbf1da32acfe8c5fd49632ff", expectedy: "723515444cb320fe437b9cea3b51293f5fbcb5913739ad35eab28b1863f7c312"},
|
||||
// multiple empty chunks
|
||||
{chunks: [][]string{{}, {}}, expectedz: "17772348f946a4e4adfcaf5c1690d078933b6b090ca9a52fab6c7e545b1007ae", expectedy: "05ba9abbc81a1c97f4cdaa683a7e0c731d9dfd88feef8f7b2fcfd79e593662b5"},
|
||||
// multiple non-empty chunks
|
||||
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "60376321eea0886c29bd97d95851c7b5fbdb064c8adfdadd7678617b32b3ebf2", expectedy: "50cfbcece01cadb4eade40649e17b140b31f96088097e38f020e31dfe6551604"},
|
||||
// empty chunk followed by non-empty chunk
|
||||
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "054539f03564eda9462d582703cde0788e4e27c311582ddfb19835358273a7ca", expectedy: "1fba03580b5908c4c66b48e79c10e7a34e4b27ed37a1a049b3e17e017cad5245"},
|
||||
// non-empty chunk followed by empty chunk
|
||||
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "0b82dceaa6ca4b5d704590c921accfd991b56b5ad0212e6a4e63e54915a2053b", expectedy: "2362f3a0c87f0ea11eb898ed608c7f09a42926a058d4c5d111a0f54cad10ebbd"},
|
||||
// max number of chunks all empty
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "174cd3ba9b2ae8ab789ec0b5b8e0b27ee122256ec1756c383dbf2b5b96903f1b", expectedy: "225cab9658904181671eb7abc342ffc36a6836048b64a67f0fb758439da2567b"},
|
||||
// max number of chunks all non-empty
|
||||
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1e93e961cdfb4bd26a5be48f23af4f1aa8c6bebe57a089d3250f8afb1e988bf8", expectedy: "24ed4791a70b28a6bad21c22d58f82a5ea5f9f9d2bcfc07428b494e9ae93de6e"},
|
||||
// single chunk blob full
|
||||
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "61405cb0b114dfb4d611be84bedba0fcd2e55615e193e424f1cc7b1af0df3d31", expectedy: "58609bbca10e50489b630ecb5b9347378579ed784d6a10749fd505055d35c3c0"},
|
||||
// multiple chunks blob full
|
||||
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "22533c3ea99536b4b83a89835aa91e6f0d2fc3866c201e18d7ca4b3af92fad61", expectedy: "40d4b71492e1a06ee3c273ef9003c7cb05aed021208871e13fa33302fa0f4dcc"},
|
||||
// max number of chunks only last one non-empty not full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "0e6525c0dd261e8f62342b1139062bb23bc2b8b460163364598fb29e82a4eed5", expectedy: "1db984d6deb5e84bc67d0755aa2da8fe687233147603b4ecba94d0c8463c3836"},
|
||||
// max number of chunks only last one non-empty full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "3a638eac98f22f817b84e3d81ccaa3de080f83dc80a5823a3f19320ef3cb6fc8", expectedy: "73ab100278822144e2ed8c9d986e92f7a2662fd18a51bdf96ec55848578b227a"},
|
||||
// max number of chunks but last is empty
|
||||
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "02ef442d99f450559647a7823f1be0e148c75481cc5c703c02a116e8ac531fa8", expectedy: "31743538cfc3ac43d1378a5c497ebc9462c20b4cb4470e0e7a9f7342ea948333"},
|
||||
} {
|
||||
chunks := []*encoding.Chunk{}
|
||||
|
||||
for _, c := range tc.chunks {
|
||||
block := &encoding.Block{Transactions: []*types.TransactionData{}}
|
||||
|
||||
for _, data := range c {
|
||||
tx := &types.TransactionData{Type: 0xff, Data: data}
|
||||
block.Transactions = append(block.Transactions, tx)
|
||||
}
|
||||
|
||||
chunk := &encoding.Chunk{Blocks: []*encoding.Block{block}}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
b, z, err := constructBlobPayload(chunks)
|
||||
assert.NoError(t, err)
|
||||
actualZ := hex.EncodeToString(z[:])
|
||||
assert.Equal(t, tc.expectedz, actualZ)
|
||||
|
||||
_, y, err := kzg4844.ComputeProof(*b, *z)
|
||||
assert.NoError(t, err)
|
||||
actualY := hex.EncodeToString(y[:])
|
||||
assert.Equal(t, tc.expectedy, actualY)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// CodecVersion defines the version of encoder and decoder.
|
||||
@@ -17,8 +18,18 @@ const (
|
||||
|
||||
// CodecV1 represents the version 1 of the encoder and decoder.
|
||||
CodecV1
|
||||
|
||||
// txTypeTest is a special transaction type used in unit tests.
|
||||
txTypeTest = 0xff
|
||||
)
|
||||
|
||||
func init() {
|
||||
// make sure txTypeTest will not interfere with other transaction types
|
||||
if txTypeTest == types.LegacyTxType || txTypeTest == types.AccessListTxType || txTypeTest == types.DynamicFeeTxType || txTypeTest == types.BlobTxType || txTypeTest == types.L1MessageTxType {
|
||||
log.Crit("txTypeTest is overlapping with existing transaction types")
|
||||
}
|
||||
}
|
||||
|
||||
// Block represents an L2 block.
|
||||
type Block struct {
|
||||
Header *types.Header
|
||||
@@ -134,6 +145,10 @@ func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
case txTypeTest:
|
||||
// in the tests, we simply use `data` as the RLP-encoded transaction
|
||||
return data, nil
|
||||
|
||||
case types.L1MessageTxType: // L1MessageTxType is not supported
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.84"
|
||||
var tag = "v4.3.85"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -37,7 +37,8 @@ contract DeployL1BridgeContracts is Script {
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
|
||||
address L1_PLONK_VERIFIER_ADDR = vm.envAddress("L1_PLONK_VERIFIER_ADDR");
|
||||
address L1_PLONK_VERIFIER_0_ADDR = vm.envAddress("L1_PLONK_VERIFIER_0_ADDR");
|
||||
address L1_PLONK_VERIFIER_1_ADDR = vm.envAddress("L1_PLONK_VERIFIER_1_ADDR");
|
||||
|
||||
address L1_PROXY_ADMIN_ADDR = vm.envAddress("L1_PROXY_ADMIN_ADDR");
|
||||
|
||||
@@ -55,7 +56,9 @@ contract DeployL1BridgeContracts is Script {
|
||||
address L2_SCROLL_STANDARD_ERC20_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_ADDR");
|
||||
address L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR = vm.envAddress("L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR");
|
||||
|
||||
ZkEvmVerifierV1 zkEvmVerifierV1;
|
||||
// TODO: refactor ZkEvmVerifierV1 into an array?
|
||||
ZkEvmVerifierV1 zkEvmVerifierV1_0;
|
||||
ZkEvmVerifierV1 zkEvmVerifierV1_1;
|
||||
MultipleVersionRollupVerifier rollupVerifier;
|
||||
EnforcedTxGateway enforcedTxGateway;
|
||||
ProxyAdmin proxyAdmin;
|
||||
@@ -66,7 +69,7 @@ contract DeployL1BridgeContracts is Script {
|
||||
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployZkEvmVerifierV1();
|
||||
deployZkEvmVerifierV1s();
|
||||
deployMultipleVersionRollupVerifier();
|
||||
deployL1Whitelist();
|
||||
deployEnforcedTxGateway();
|
||||
@@ -85,17 +88,23 @@ contract DeployL1BridgeContracts is Script {
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployZkEvmVerifierV1() internal {
|
||||
zkEvmVerifierV1 = new ZkEvmVerifierV1(L1_PLONK_VERIFIER_ADDR);
|
||||
// TODO: refactor
|
||||
function deployZkEvmVerifierV1s() internal {
|
||||
zkEvmVerifierV1_0 = new ZkEvmVerifierV1(L1_PLONK_VERIFIER_0_ADDR);
|
||||
zkEvmVerifierV1_1 = new ZkEvmVerifierV1(L1_PLONK_VERIFIER_1_ADDR);
|
||||
|
||||
logAddress("L1_ZKEVM_VERIFIER_V1_ADDR", address(zkEvmVerifierV1));
|
||||
logAddress("L1_ZKEVM_VERIFIER_V1_0_ADDR", address(zkEvmVerifierV1_0));
|
||||
logAddress("L1_ZKEVM_VERIFIER_V1_1_ADDR", address(zkEvmVerifierV1_1));
|
||||
}
|
||||
|
||||
// TODO: refactor
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
uint256[] memory _versions = new uint256[](1);
|
||||
address[] memory _verifiers = new address[](1);
|
||||
uint256[] memory _versions = new uint256[](2);
|
||||
address[] memory _verifiers = new address[](2);
|
||||
_versions[0] = 0;
|
||||
_verifiers[0] = address(zkEvmVerifierV1);
|
||||
_verifiers[0] = address(zkEvmVerifierV1_0);
|
||||
_versions[1] = 1;
|
||||
_verifiers[1] = address(zkEvmVerifierV1_1);
|
||||
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IScrollChain
|
||||
/// @notice The interface for ScrollChain.
|
||||
interface IScrollChain {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -43,23 +45,23 @@ interface IScrollChain {
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice The latest finalized batch index.
|
||||
/// @return The latest finalized batch index.
|
||||
function lastFinalizedBatchIndex() external view returns (uint256);
|
||||
|
||||
/// @notice Return the batch hash of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The batch hash of a committed batch.
|
||||
function committedBatches(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the state root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The state root of a committed batch.
|
||||
function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the message root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The message root of a committed batch.
|
||||
function withdrawRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return whether the batch is finalized by batch index.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return Whether the batch is finalized by batch index.
|
||||
function isBatchFinalized(uint256 batchIndex) external view returns (bool);
|
||||
|
||||
/*****************************
|
||||
|
||||
@@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
/// @title MultipleVersionRollupVerifier
|
||||
/// @notice Verifies aggregate zk proofs using the appropriate verifier.
|
||||
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************/
|
||||
|
||||
/// @notice The address of ScrollChain contract.
|
||||
address immutable scrollChain;
|
||||
address public immutable scrollChain;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
@@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// The verifiers are sorted by batchIndex in increasing order.
|
||||
mapping(uint256 => Verifier[]) public legacyVerifiers;
|
||||
|
||||
/// @notice Mapping from verifier version to the lastest used zkevm verifier.
|
||||
/// @notice Mapping from verifier version to the latest used zkevm verifier.
|
||||
mapping(uint256 => Verifier) public latestVerifier;
|
||||
|
||||
/***************
|
||||
@@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************************/
|
||||
|
||||
/// @notice Return the number of legacy verifiers.
|
||||
/// @param _version The version of legacy verifiers.
|
||||
/// @return The number of legacy verifiers.
|
||||
function legacyVerifiersLength(uint256 _version) external view returns (uint256) {
|
||||
return legacyVerifiers[_version].length;
|
||||
}
|
||||
@@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// @notice Compute the verifier should be used for specific batch.
|
||||
/// @param _version The version of verifier to query.
|
||||
/// @param _batchIndex The batch index to query.
|
||||
/// @return The address of verifier.
|
||||
function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) {
|
||||
// Normally, we will use the latest verifier.
|
||||
Verifier memory _verifier = latestVerifier[_version];
|
||||
@@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of zkevm verifier.
|
||||
/// @param _version The version of the verifier.
|
||||
/// @param _startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param _verifier The address of new verifier.
|
||||
function updateVerifier(
|
||||
|
||||
@@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*************/
|
||||
|
||||
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
|
||||
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
@@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*****************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
/// @param _batchHeader The header of the genesis batch.
|
||||
/// @param _stateRoot The state root of the genesis block.
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
// check genesis batch header length
|
||||
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IRollupVerifier
|
||||
/// @notice The interface for rollup verifier.
|
||||
interface IRollupVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param batchIndex The batch index to verify.
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
@@ -38,7 +37,7 @@ type CoordinatorApp struct {
|
||||
HTTPPort int64
|
||||
|
||||
args []string
|
||||
docker.AppAPI
|
||||
*cmd.Cmd
|
||||
}
|
||||
|
||||
// NewCoordinatorApp return a new coordinatorApp manager.
|
||||
@@ -64,14 +63,14 @@ func NewCoordinatorApp(testApps *testcontainers.TestcontainerApps, configFile st
|
||||
|
||||
// RunApp run coordinator-test child process by multi parameters.
|
||||
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
|
||||
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
c.Cmd = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.Cmd.RunApp(func() bool { return c.Cmd.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release coordinator-test.
|
||||
func (c *CoordinatorApp) Free() {
|
||||
if !utils.IsNil(c.AppAPI) {
|
||||
c.AppAPI.WaitExit()
|
||||
if !utils.IsNil(c.Cmd) {
|
||||
c.Cmd.WaitExit()
|
||||
}
|
||||
_ = os.Remove(c.coordinatorFile)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -34,7 +33,7 @@ func TestFFI(t *testing.T) {
|
||||
AssetsPath: *assetsPath,
|
||||
}
|
||||
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
v, err := NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/testcontainers"
|
||||
tc "scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/message"
|
||||
@@ -156,7 +155,7 @@ func setEnv(t *testing.T) {
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
testApps = tc.NewTestcontainerApps()
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
db, err = testApps.GetGormDBClient()
|
||||
|
||||
@@ -1,93 +1,89 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/common/testcontainers"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
pgDB *sqlx.DB
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
pgDB *sql.DB
|
||||
)
|
||||
|
||||
func initEnv(t *testing.T) error {
|
||||
func setupEnv(t *testing.T) {
|
||||
// Start db container.
|
||||
base.RunDBImage(t)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
gormClient, err := testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
pgDB, err = gormClient.DB()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Create db orm handler.
|
||||
factory, err := database.NewOrmFactory(base.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pgDB = factory.GetDB()
|
||||
return nil
|
||||
func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
if err := initEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
setupEnv(t)
|
||||
t.Run("testCurrent", testCurrent)
|
||||
t.Run("testStatus", testStatus)
|
||||
t.Run("testResetDB", testResetDB)
|
||||
t.Run("testMigrate", testMigrate)
|
||||
t.Run("testRollback", testRollback)
|
||||
|
||||
t.Cleanup(func() {
|
||||
base.Free()
|
||||
})
|
||||
}
|
||||
|
||||
func testCurrent(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
func testStatus(t *testing.T) {
|
||||
status := Status(pgDB.DB)
|
||||
status := Status(pgDB)
|
||||
assert.NoError(t, status)
|
||||
}
|
||||
|
||||
func testResetDB(t *testing.T) {
|
||||
assert.NoError(t, ResetDB(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, ResetDB(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(16), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB.DB)
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB.DB, nil))
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
cur, err := Current(pgDB.DB)
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, version, cur+1)
|
||||
|
||||
targetVersion := int64(0)
|
||||
assert.NoError(t, Rollback(pgDB.DB, &targetVersion))
|
||||
assert.NoError(t, Rollback(pgDB, &targetVersion))
|
||||
|
||||
cur, err = Current(pgDB.DB)
|
||||
cur, err = Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"scroll-tech/prover/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
@@ -40,7 +39,7 @@ type ProverApp struct {
|
||||
index int
|
||||
name string
|
||||
args []string
|
||||
docker.AppAPI
|
||||
*cmd.Cmd
|
||||
}
|
||||
|
||||
// NewProverApp return a new proverApp manager.
|
||||
@@ -65,7 +64,7 @@ func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.Moc
|
||||
name: name,
|
||||
args: []string{"--log.debug", "--config", proverFile},
|
||||
}
|
||||
proverApp.AppAPI = cmd.NewCmd(proverApp.name, proverApp.args...)
|
||||
proverApp.Cmd = cmd.NewCmd(proverApp.name, proverApp.args...)
|
||||
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -74,13 +73,13 @@ func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.Moc
|
||||
|
||||
// RunApp run prover-test child process by multi parameters.
|
||||
func (r *ProverApp) RunApp(t *testing.T) {
|
||||
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
|
||||
r.Cmd.RunApp(func() bool { return r.Cmd.WaitResult(t, time.Second*40, "prover start successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release prover-test.
|
||||
func (r *ProverApp) Free() {
|
||||
if !utils.IsNil(r.AppAPI) {
|
||||
r.AppAPI.WaitExit()
|
||||
if !utils.IsNil(r.Cmd) {
|
||||
r.Cmd.WaitExit()
|
||||
}
|
||||
_ = os.Remove(r.proverFile)
|
||||
_ = os.Remove(r.Config.KeystorePath)
|
||||
|
||||
@@ -7,12 +7,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
)
|
||||
|
||||
// MockApp mockApp-test client manager.
|
||||
@@ -20,7 +19,7 @@ type MockApp struct {
|
||||
Config *config.Config
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
mockApps map[utils.MockAppName]docker.AppAPI
|
||||
mockApps map[utils.MockAppName]*cmd.Cmd
|
||||
|
||||
originFile string
|
||||
rollupFile string
|
||||
@@ -33,7 +32,7 @@ func NewRollupApp(testApps *testcontainers.TestcontainerApps, file string) *Mock
|
||||
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", testApps.Timestamp)
|
||||
rollupApp := &MockApp{
|
||||
testApps: testApps,
|
||||
mockApps: make(map[utils.MockAppName]docker.AppAPI),
|
||||
mockApps: make(map[utils.MockAppName]*cmd.Cmd),
|
||||
originFile: file,
|
||||
rollupFile: rollupFile,
|
||||
args: []string{"--log.debug", "--config", rollupFile},
|
||||
@@ -69,7 +68,7 @@ func (b *MockApp) WaitExit() {
|
||||
for _, app := range b.mockApps {
|
||||
app.WaitExit()
|
||||
}
|
||||
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
|
||||
b.mockApps = make(map[utils.MockAppName]*cmd.Cmd)
|
||||
}
|
||||
|
||||
// Free stop and release rollup mocked apps.
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
dockercompose "scroll-tech/common/docker-compose/l1"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
@@ -26,7 +26,7 @@ var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
posL1TestEnv *dockercompose.PoSL1TestEnv
|
||||
|
||||
// l2geth client
|
||||
@@ -53,16 +53,25 @@ func setupEnv(t *testing.T) {
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
base.RunL2Geth(t)
|
||||
base.RunDBImage(t)
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
assert.NoError(t, err, "failed to create PoS L1 test environment")
|
||||
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
|
||||
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn, err := testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.DBConfig = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
DSN: dsn,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
@@ -70,7 +79,7 @@ func setupEnv(t *testing.T) {
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = base.L2Client()
|
||||
l2Cli, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
|
||||
@@ -97,19 +106,14 @@ func setupEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
base.Free()
|
||||
|
||||
var err error
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
if err != nil {
|
||||
log.Crit("failed to create PoS L1 test environment", "err", err)
|
||||
}
|
||||
if err := posL1TestEnv.Start(); err != nil {
|
||||
log.Crit("failed to start PoS L1 test environment", "err", err)
|
||||
}
|
||||
defer posL1TestEnv.Stop()
|
||||
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
if posL1TestEnv != nil {
|
||||
posL1TestEnv.Stop()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
|
||||
@@ -26,12 +26,10 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
dockercompose "scroll-tech/common/docker-compose/l1"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
@@ -39,12 +37,10 @@ import (
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
)
|
||||
|
||||
const TXBatch = 50
|
||||
|
||||
var (
|
||||
privateKey *ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
posL1TestEnv *dockercompose.PoSL1TestEnv
|
||||
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
|
||||
txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
|
||||
@@ -54,19 +50,14 @@ var (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
defer base.Free()
|
||||
|
||||
var err error
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
if err != nil {
|
||||
log.Crit("failed to create PoS L1 test environment", "err", err)
|
||||
}
|
||||
if err := posL1TestEnv.Start(); err != nil {
|
||||
log.Crit("failed to start PoS L1 test environment", "err", err)
|
||||
}
|
||||
defer posL1TestEnv.Stop()
|
||||
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
if posL1TestEnv != nil {
|
||||
posL1TestEnv.Stop()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
@@ -82,17 +73,18 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
privateKey = priv
|
||||
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
assert.NoError(t, err, "failed to create PoS L1 test environment")
|
||||
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
|
||||
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
|
||||
base.RunDBImage(t)
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
|
||||
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
|
||||
client, err := testApps.GetL1GethClient()
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
|
||||
@@ -11,9 +11,8 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/encoding"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
@@ -23,7 +22,7 @@ var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
@@ -42,19 +41,27 @@ func setupEnv(t *testing.T) (err error) {
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
base.RunImages(t)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn, err := testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.DBConfig = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
DSN: dsn,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = base.L2Client()
|
||||
l2Cli, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
@@ -73,11 +80,12 @@ func setupDB(t *testing.T) *gorm.DB {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
|
||||
@@ -14,8 +14,7 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
@@ -23,7 +22,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *L2Block
|
||||
@@ -37,23 +36,23 @@ var (
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &testing.T{}
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
tearDownEnv(t)
|
||||
}()
|
||||
setupEnv(t)
|
||||
defer tearDownEnv(t)
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
var err error
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -81,7 +80,6 @@ func tearDownEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
sqlDB.Close()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestL1BlockOrm(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user