mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
32 Commits
v4.3.76
...
test-v1.22
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7a57235d3 | ||
|
|
91d21301ec | ||
|
|
4b32a44a70 | ||
|
|
55b400c5fb | ||
|
|
1b49091207 | ||
|
|
5b827c3c18 | ||
|
|
6b2eb80aa5 | ||
|
|
71f88b04f5 | ||
|
|
bcd9764bcd | ||
|
|
b4f8377a08 | ||
|
|
b52d43caa8 | ||
|
|
201bf401cd | ||
|
|
898ac1d25c | ||
|
|
1336b89fb8 | ||
|
|
73045df037 | ||
|
|
b3093e9eb6 | ||
|
|
3d5250e52d | ||
|
|
b7324c76bc | ||
|
|
6d6e98bd6e | ||
|
|
9e35ce0ab4 | ||
|
|
b86ebaefaf | ||
|
|
78a4298eda | ||
|
|
49d8387714 | ||
|
|
af2913903b | ||
|
|
f8a7d70872 | ||
|
|
790fc44b40 | ||
|
|
620c71b16d | ||
|
|
ed0e0e4c18 | ||
|
|
d203033e13 | ||
|
|
7d45926687 | ||
|
|
5362e28f74 | ||
|
|
e8eb7ff8fd |
1
.github/workflows/docker.yml
vendored
1
.github/workflows/docker.yml
vendored
@@ -39,6 +39,7 @@ jobs:
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
platforms: linux/amd64,linux/arm64
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: event-watcher
|
||||
|
||||
@@ -8,7 +8,7 @@ require (
|
||||
github.com/go-redis/redis/v8 v8.11.5
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.6.0
|
||||
@@ -60,6 +60,7 @@ require (
|
||||
github.com/holiman/uint256 v1.2.4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/iden3/go-iden3-crypto v0.0.15 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
|
||||
@@ -184,8 +184,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
|
||||
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
|
||||
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
@@ -311,8 +311,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
|
||||
@@ -23,6 +23,7 @@ type FetcherConfig struct {
|
||||
DAIGatewayAddr string `json:"DAIGatewayAddr"`
|
||||
USDCGatewayAddr string `json:"USDCGatewayAddr"`
|
||||
LIDOGatewayAddr string `json:"LIDOGatewayAddr"`
|
||||
PufferGatewayAddr string `json:"PufferGatewayAddr"`
|
||||
ERC721GatewayAddr string `json:"ERC721GatewayAddr"`
|
||||
ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
|
||||
@@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
f := &L1FetcherLogic{
|
||||
|
||||
@@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient
|
||||
|
||||
if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr))
|
||||
}
|
||||
|
||||
if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) {
|
||||
addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr))
|
||||
}
|
||||
|
||||
log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList)
|
||||
|
||||
@@ -1,62 +1,27 @@
|
||||
package database
|
||||
package database_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestGormLogger(t *testing.T) {
|
||||
output := io.Writer(os.Stderr)
|
||||
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
|
||||
if usecolor {
|
||||
output = colorable.NewColorableStderr()
|
||||
}
|
||||
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
|
||||
glogger := log.NewGlogHandler(ostream)
|
||||
// Set log level
|
||||
glogger.Verbosity(log.LvlTrace)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var gl gormLogger
|
||||
gl.gethLogger = log.Root()
|
||||
|
||||
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
|
||||
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
|
||||
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
|
||||
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
version.Version = "v4.1.98-aaa-bbb-ccc"
|
||||
base := docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
dbCfg := &Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
testApps := testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err := InitDB(dbCfg)
|
||||
db, err := testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
sqlDB, err := Ping(db)
|
||||
sqlDB, err := database.Ping(db)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, sqlDB)
|
||||
|
||||
assert.NoError(t, CloseDB(db))
|
||||
assert.NoError(t, database.CloseDB(db))
|
||||
}
|
||||
|
||||
35
common/database/logger_test.go
Normal file
35
common/database/logger_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func TestGormLogger(t *testing.T) {
|
||||
output := io.Writer(os.Stderr)
|
||||
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
|
||||
if usecolor {
|
||||
output = colorable.NewColorableStderr()
|
||||
}
|
||||
ostream := log.StreamHandler(output, log.TerminalFormat(usecolor))
|
||||
glogger := log.NewGlogHandler(ostream)
|
||||
// Set log level
|
||||
glogger.Verbosity(log.LvlTrace)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var gl gormLogger
|
||||
gl.gethLogger = log.Root()
|
||||
|
||||
gl.Error(context.Background(), "test %s error:%v", "testError", errors.New("test error"))
|
||||
gl.Warn(context.Background(), "test %s warn:%v", "testWarn", errors.New("test warn"))
|
||||
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
|
||||
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
|
||||
}
|
||||
@@ -19,7 +19,7 @@ CAPELLA_FORK_VERSION: 0x20000092
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD: 16
|
||||
|
||||
# Deneb
|
||||
DENEB_FORK_EPOCH: 1
|
||||
DENEB_FORK_EPOCH: 0
|
||||
DENEB_FORK_VERSION: 0x20000093
|
||||
|
||||
# Time parameters
|
||||
|
||||
@@ -19,7 +19,7 @@ services:
|
||||
command:
|
||||
- testnet
|
||||
- generate-genesis
|
||||
- --fork=capella
|
||||
- --fork=deneb
|
||||
- --num-validators=64
|
||||
- --genesis-time-delay=3
|
||||
- --output-ssz=/data/consensus/genesis.ssz
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/database"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
l1StartPort = 10000
|
||||
l2StartPort = 20000
|
||||
dbStartPort = 30000
|
||||
)
|
||||
|
||||
// AppAPI app interface.
|
||||
type AppAPI interface {
|
||||
IsRunning() bool
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
ExpectWithTimeout(t *testing.T, parallel bool, timeout time.Duration, keyword string)
|
||||
}
|
||||
|
||||
// App is collection struct of runtime docker images
|
||||
type App struct {
|
||||
L1gethImg GethImgInstance
|
||||
L2gethImg GethImgInstance
|
||||
DBImg ImgInstance
|
||||
|
||||
dbClient *sql.DB
|
||||
DBConfig *database.DBConfig
|
||||
DBConfigFile string
|
||||
|
||||
// common time stamp.
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewDockerApp returns new instance of dockerApp struct
|
||||
func NewDockerApp() *App {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
app := &App{
|
||||
Timestamp: timestamp,
|
||||
L1gethImg: newTestL1Docker(),
|
||||
L2gethImg: newTestL2Docker(),
|
||||
DBImg: newTestDBDocker("postgres"),
|
||||
DBConfigFile: fmt.Sprintf("/tmp/%d_db-config.json", timestamp),
|
||||
}
|
||||
if err := app.mockDBConfig(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
// RunImages runs all images togather
|
||||
func (b *App) RunImages(t *testing.T) {
|
||||
b.RunDBImage(t)
|
||||
b.RunL1Geth(t)
|
||||
b.RunL2Geth(t)
|
||||
}
|
||||
|
||||
// RunDBImage starts postgres docker container.
|
||||
func (b *App) RunDBImage(t *testing.T) {
|
||||
if b.DBImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.DBImg.Start())
|
||||
|
||||
// try 5 times until the db is ready.
|
||||
ok := utils.TryTimes(10, func() bool {
|
||||
db, err := sqlx.Open("postgres", b.DBImg.Endpoint())
|
||||
return err == nil && db != nil && db.Ping() == nil
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
// Free clear all running images, double check and recycle docker container.
|
||||
func (b *App) Free() {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
_ = b.L1gethImg.Stop()
|
||||
}
|
||||
if b.L2gethImg.IsRunning() {
|
||||
_ = b.L2gethImg.Stop()
|
||||
}
|
||||
if b.DBImg.IsRunning() {
|
||||
_ = b.DBImg.Stop()
|
||||
_ = os.Remove(b.DBConfigFile)
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
_ = b.dbClient.Close()
|
||||
b.dbClient = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunL1Geth starts l1geth docker container.
|
||||
func (b *App) RunL1Geth(t *testing.T) {
|
||||
if b.L1gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.L1gethImg.Start())
|
||||
}
|
||||
|
||||
// L1Client returns a ethclient by dialing running l1geth
|
||||
func (b *App) L1Client() (*ethclient.Client, error) {
|
||||
if utils.IsNil(b.L1gethImg) {
|
||||
return nil, fmt.Errorf("l1 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.L1gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// RunL2Geth starts l2geth docker container.
|
||||
func (b *App) RunL2Geth(t *testing.T) {
|
||||
if b.L2gethImg.IsRunning() {
|
||||
return
|
||||
}
|
||||
assert.NoError(t, b.L2gethImg.Start())
|
||||
}
|
||||
|
||||
// L2Client returns a ethclient by dialing running l2geth
|
||||
func (b *App) L2Client() (*ethclient.Client, error) {
|
||||
if utils.IsNil(b.L2gethImg) {
|
||||
return nil, fmt.Errorf("l2 geth is not running")
|
||||
}
|
||||
client, err := ethclient.Dial(b.L2gethImg.Endpoint())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// DBClient create and return *sql.DB instance.
|
||||
func (b *App) DBClient(t *testing.T) *sql.DB {
|
||||
if !utils.IsNil(b.dbClient) {
|
||||
return b.dbClient
|
||||
}
|
||||
var (
|
||||
cfg = b.DBConfig
|
||||
err error
|
||||
)
|
||||
b.dbClient, err = sql.Open(cfg.DriverName, cfg.DSN)
|
||||
assert.NoError(t, err)
|
||||
b.dbClient.SetMaxOpenConns(cfg.MaxOpenNum)
|
||||
b.dbClient.SetMaxIdleConns(cfg.MaxIdleNum)
|
||||
assert.NoError(t, b.dbClient.Ping())
|
||||
return b.dbClient
|
||||
}
|
||||
|
||||
func (b *App) mockDBConfig() error {
|
||||
b.DBConfig = &database.DBConfig{
|
||||
DSN: "",
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
|
||||
if b.DBImg != nil {
|
||||
b.DBConfig.DSN = b.DBImg.Endpoint()
|
||||
}
|
||||
data, err := json.Marshal(b.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(b.DBConfigFile, data, 0644) //nolint:gosec
|
||||
}
|
||||
|
||||
func newTestL1Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgGeth("scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestL2Docker() GethImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgGeth("scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
|
||||
}
|
||||
|
||||
func newTestDBDocker(driverName string) ImgInstance {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
return NewImgDB(driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgDB the postgres image manager.
|
||||
type ImgDB struct {
|
||||
image string
|
||||
name string
|
||||
id string
|
||||
|
||||
dbName string
|
||||
port int
|
||||
password string
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgDB return postgres db img instance.
|
||||
func NewImgDB(image, password, dbName string, port int) ImgInstance {
|
||||
img := &ImgDB{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
|
||||
password: password,
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start postgres db container.
|
||||
func (i *ImgDB) Start() error {
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the container.
|
||||
func (i *ImgDB) Stop() error {
|
||||
if !i.running {
|
||||
return nil
|
||||
}
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// stop the running container.
|
||||
if i.id == "" {
|
||||
i.id = GetContainerID(i.name)
|
||||
}
|
||||
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, i.id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
// Endpoint return the dsn.
|
||||
func (i *ImgDB) Endpoint() string {
|
||||
return fmt.Sprintf("postgres://postgres:%s@localhost:%d/%s?sslmode=disable", i.password, i.port, i.dbName)
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgDB) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
}
|
||||
|
||||
cmd = append(cmd, envs...)
|
||||
return append(cmd, i.image)
|
||||
}
|
||||
|
||||
func (i *ImgDB) isOk() bool {
|
||||
keyword := "database system is ready to accept connections"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
// Start cmd in parallel.
|
||||
i.cmd.RunCmd(true)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
utils.TryTimes(20, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 20):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ImgGeth the geth image manager include l1geth and l2geth.
|
||||
type ImgGeth struct {
|
||||
image string
|
||||
name string
|
||||
id string
|
||||
|
||||
volume string
|
||||
ipcPath string
|
||||
httpPort int
|
||||
wsPort int
|
||||
chainID *big.Int
|
||||
|
||||
running bool
|
||||
cmd *cmd.Cmd
|
||||
}
|
||||
|
||||
// NewImgGeth return geth img instance.
|
||||
func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
|
||||
img := &ImgGeth{
|
||||
image: image,
|
||||
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
|
||||
volume: volume,
|
||||
ipcPath: ipc,
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
}
|
||||
img.cmd = cmd.NewCmd("docker", img.params()...)
|
||||
return img
|
||||
}
|
||||
|
||||
// Start run image and check if it is running healthily.
|
||||
func (i *ImgGeth) Start() error {
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
return fmt.Errorf("container already exist, name: %s", i.name)
|
||||
}
|
||||
i.running = i.isOk()
|
||||
if !i.running {
|
||||
_ = i.Stop()
|
||||
return fmt.Errorf("failed to start image: %s", i.image)
|
||||
}
|
||||
|
||||
// try 10 times to get chainID until is ok.
|
||||
utils.TryTimes(10, func() bool {
|
||||
client, err := ethclient.Dial(i.Endpoint())
|
||||
if err == nil && client != nil {
|
||||
i.chainID, err = client.ChainID(context.Background())
|
||||
return err == nil && i.chainID != nil
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRunning returns docker container's running status.
|
||||
func (i *ImgGeth) IsRunning() bool {
|
||||
return i.running
|
||||
}
|
||||
|
||||
// Endpoint return the connection endpoint.
|
||||
func (i *ImgGeth) Endpoint() string {
|
||||
switch true {
|
||||
case i.httpPort != 0:
|
||||
return fmt.Sprintf("http://127.0.0.1:%d", i.httpPort)
|
||||
case i.wsPort != 0:
|
||||
return fmt.Sprintf("ws://127.0.0.1:%d", i.wsPort)
|
||||
default:
|
||||
return i.ipcPath
|
||||
}
|
||||
}
|
||||
|
||||
// ChainID return chainID.
|
||||
func (i *ImgGeth) ChainID() *big.Int {
|
||||
return i.chainID
|
||||
}
|
||||
|
||||
func (i *ImgGeth) isOk() bool {
|
||||
keyword := "WebSocket enabled"
|
||||
okCh := make(chan struct{}, 1)
|
||||
i.cmd.RegistFunc(keyword, func(buf string) {
|
||||
if strings.Contains(buf, keyword) {
|
||||
select {
|
||||
case okCh <- struct{}{}:
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
defer i.cmd.UnRegistFunc(keyword)
|
||||
// Start cmd in parallel.
|
||||
i.cmd.RunCmd(true)
|
||||
|
||||
select {
|
||||
case <-okCh:
|
||||
utils.TryTimes(20, func() bool {
|
||||
i.id = GetContainerID(i.name)
|
||||
return i.id != ""
|
||||
})
|
||||
case err := <-i.cmd.ErrChan:
|
||||
if err != nil {
|
||||
fmt.Printf("failed to start %s, err: %v\n", i.name, err)
|
||||
}
|
||||
case <-time.After(time.Second * 10):
|
||||
return false
|
||||
}
|
||||
return i.id != ""
|
||||
}
|
||||
|
||||
// Stop the docker container.
|
||||
func (i *ImgGeth) Stop() error {
|
||||
if !i.running {
|
||||
return nil
|
||||
}
|
||||
i.running = false
|
||||
|
||||
ctx := context.Background()
|
||||
// check if container is running, stop the running container.
|
||||
id := GetContainerID(i.name)
|
||||
if id != "" {
|
||||
timeoutSec := 3
|
||||
timeout := container.StopOptions{
|
||||
Timeout: &timeoutSec,
|
||||
}
|
||||
if err := cli.ContainerStop(ctx, id, timeout); err != nil {
|
||||
return err
|
||||
}
|
||||
i.id = id
|
||||
}
|
||||
// remove the stopped container.
|
||||
return cli.ContainerRemove(ctx, i.id, container.RemoveOptions{})
|
||||
}
|
||||
|
||||
func (i *ImgGeth) params() []string {
|
||||
cmds := []string{"run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
}
|
||||
if i.wsPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.wsPort) + ":8546"}...)
|
||||
}
|
||||
|
||||
var envs []string
|
||||
if i.ipcPath != "" {
|
||||
envs = append(envs, []string{"-e", fmt.Sprintf("IPC_PATH=%s", i.ipcPath)}...)
|
||||
}
|
||||
|
||||
if i.volume != "" {
|
||||
cmds = append(cmds, []string{"-v", fmt.Sprintf("%s:%s", i.volume, i.volume)}...)
|
||||
}
|
||||
|
||||
cmds = append(cmds, ports...)
|
||||
cmds = append(cmds, envs...)
|
||||
|
||||
return append(cmds, i.image)
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq" //nolint:golint
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
base.RunDBImage(t)
|
||||
|
||||
db, err := sqlx.Open("postgres", base.DBImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.Ping())
|
||||
}
|
||||
|
||||
func TestL1Geth(t *testing.T) {
|
||||
base.RunL1Geth(t)
|
||||
|
||||
client, err := base.L1Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
|
||||
func TestL2Geth(t *testing.T) {
|
||||
base.RunL2Geth(t)
|
||||
|
||||
client, err := base.L2Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainID, err := client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
t.Logf("chainId: %s", chainID.String())
|
||||
}
|
||||
@@ -15,7 +15,7 @@
|
||||
"archimedesBlock": 0,
|
||||
"shanghaiBlock": 0,
|
||||
"clique": {
|
||||
"period": 3,
|
||||
"period": 1,
|
||||
"epoch": 30000
|
||||
},
|
||||
"scroll": {
|
||||
|
||||
@@ -33,7 +33,8 @@ func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]
|
||||
{name: "arrowGlacier", block: config.ArrowGlacierBlock},
|
||||
{name: "archimedes", block: config.ArchimedesBlock},
|
||||
{name: "shanghai", block: config.ShanghaiBlock},
|
||||
{name: "banach", block: config.BanachBlock},
|
||||
{name: "bernoulli", block: config.BernoulliBlock},
|
||||
{name: "curie", block: config.CurieBlock},
|
||||
} {
|
||||
if fork.block == nil {
|
||||
continue
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
|
||||
func TestCollectSortedForkBlocks(t *testing.T) {
|
||||
l, m, n := CollectSortedForkHeights(¶ms.ChainConfig{
|
||||
EIP155Block: big.NewInt(4),
|
||||
EIP158Block: big.NewInt(3),
|
||||
ByzantiumBlock: big.NewInt(3),
|
||||
ConstantinopleBlock: big.NewInt(0),
|
||||
ArchimedesBlock: big.NewInt(0),
|
||||
ShanghaiBlock: big.NewInt(3),
|
||||
BernoulliBlock: big.NewInt(3),
|
||||
CurieBlock: big.NewInt(4),
|
||||
})
|
||||
require.Equal(t, l, []uint64{
|
||||
0,
|
||||
@@ -27,9 +27,9 @@ func TestCollectSortedForkBlocks(t *testing.T) {
|
||||
0: true,
|
||||
}, m)
|
||||
require.Equal(t, map[string]uint64{
|
||||
"eip155": 4,
|
||||
"byzantium": 3,
|
||||
"constantinople": 0,
|
||||
"archimedes": 0,
|
||||
"bernoulli": 3,
|
||||
"curie": 4,
|
||||
}, n)
|
||||
}
|
||||
|
||||
|
||||
@@ -9,17 +9,16 @@ require (
|
||||
github.com/docker/docker v25.0.3+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/testcontainers/testcontainers-go v0.29.1
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1
|
||||
github.com/testcontainers/testcontainers-go v0.28.0
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
gorm.io/gorm v1.25.5
|
||||
@@ -127,7 +126,7 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.0 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
@@ -144,7 +143,6 @@ require (
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
|
||||
@@ -268,7 +268,6 @@ github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXS
|
||||
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
|
||||
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
@@ -382,8 +381,8 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.3.0/go.mod h1:t3JDKnCBlYIc0ewLF0Q7B8MXmoIaBOZj/ic7iHozM/8=
|
||||
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
|
||||
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
|
||||
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
@@ -400,8 +399,6 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
|
||||
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
@@ -443,7 +440,6 @@ github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ic
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
|
||||
@@ -469,9 +465,6 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
|
||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
@@ -614,8 +607,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
@@ -678,10 +671,12 @@ github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZ
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk=
|
||||
github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1 h1:47ipPM+s+ltCDOP3Sa1j95AkNb+z+WGiHLDbLU8ixuc=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.29.1/go.mod h1:Sqh+Ef2ESdbJQjTJl57UOkEHkOc7gXvQLg1b5xh6f1Y=
|
||||
github.com/testcontainers/testcontainers-go v0.28.0 h1:1HLm9qm+J5VikzFDYhOd+Zw12NtOl+8drH2E8nTY1r8=
|
||||
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0 h1:QOCeTYZIYixg796Ik60MOaeMgpAKPbQd5pJOdTrftyg=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0 h1:ff0s4JdYIdNAVSi/SrpN2Pdt1f+IjIw3AKjbHau8Un4=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
|
||||
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
|
||||
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
|
||||
github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA=
|
||||
|
||||
1054
common/libzkp/impl/Cargo.lock
generated
1054
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,26 +8,30 @@ edition = "2021"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[patch.crates-io]
|
||||
gobuild = { git = "https://github.com/scroll-tech/gobuild.git" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
#ethers-etherscan = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
#ethers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "scroll-dev-0220" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2wrong.git"]
|
||||
halo2wrong = { git = "https://github.com/scroll-tech/halo2wrong.git", branch = "halo2-ecc-snark-verifier-0323" }
|
||||
maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-ecc-snark-verifier-0323" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2curves.git"]
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.9", default-features = false, features = ["parallel_syn", "scroll", "shanghai", "strict-ccc"] }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
once_cell = "1.8.0"
|
||||
once_cell = "1.19"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0.66"
|
||||
|
||||
@@ -1 +1 @@
|
||||
nightly-2022-12-10
|
||||
nightly-2023-12-03
|
||||
|
||||
@@ -12,6 +12,7 @@ use prover::{
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, BlockTrace, ChunkHash, ChunkProof,
|
||||
};
|
||||
use snark_verifier_sdk::verify_evm_calldata;
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
@@ -119,7 +120,7 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
|
||||
let chunk_hashes_proofs = chunk_hashes
|
||||
.into_iter()
|
||||
.zip(chunk_proofs.into_iter())
|
||||
.zip(chunk_proofs)
|
||||
.collect();
|
||||
|
||||
let proof = PROVER
|
||||
@@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
|
||||
pub unsafe extern "C" fn verify_batch_proof(
|
||||
proof: *const c_char,
|
||||
fork_name: *const c_char,
|
||||
) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
|
||||
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
|
||||
let fork_name_str = c_char_to_str(fork_name);
|
||||
let fork_id = match fork_name_str {
|
||||
"" => 0,
|
||||
"shanghai" => 0,
|
||||
"bernoulli" => 1,
|
||||
_ => {
|
||||
log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli");
|
||||
1
|
||||
}
|
||||
};
|
||||
let verified = panic_catch(|| {
|
||||
if fork_id == 0 {
|
||||
// before upgrade#2(EIP4844)
|
||||
verify_evm_calldata(
|
||||
include_bytes!("evm_verifier_fork_1.bin").to_vec(),
|
||||
proof.calldata(),
|
||||
)
|
||||
} else {
|
||||
VERIFIER.get().unwrap().verify_agg_evm_proof(proof)
|
||||
}
|
||||
});
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
|
||||
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
BIN
common/libzkp/impl/src/evm_verifier_fork_1.bin
Normal file
Binary file not shown.
@@ -1,5 +1,3 @@
|
||||
#![feature(once_cell)]
|
||||
|
||||
mod batch;
|
||||
mod chunk;
|
||||
mod types;
|
||||
|
||||
@@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_batch_vk();
|
||||
char* check_chunk_proofs(char* chunk_proofs);
|
||||
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
|
||||
char verify_batch_proof(char* proof);
|
||||
char verify_batch_proof(char* proof, char* fork_name);
|
||||
|
||||
void init_chunk_prover(char* params_dir, char* assets_dir);
|
||||
void init_chunk_verifier(char* params_dir, char* assets_dir);
|
||||
|
||||
200
common/testcontainers/testcontainers.go
Normal file
200
common/testcontainers/testcontainers.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package testcontainers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/modules/postgres"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
)
|
||||
|
||||
// TestcontainerApps testcontainers struct
|
||||
type TestcontainerApps struct {
|
||||
postgresContainer *postgres.PostgresContainer
|
||||
l1GethContainer *testcontainers.DockerContainer
|
||||
l2GethContainer *testcontainers.DockerContainer
|
||||
|
||||
// common time stamp in nanoseconds.
|
||||
Timestamp int
|
||||
}
|
||||
|
||||
// NewTestcontainerApps returns new instance of TestcontainerApps struct
|
||||
func NewTestcontainerApps() *TestcontainerApps {
|
||||
timestamp := time.Now().Nanosecond()
|
||||
return &TestcontainerApps{
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// StartPostgresContainer starts a postgres container
|
||||
func (t *TestcontainerApps) StartPostgresContainer() error {
|
||||
if t.postgresContainer != nil && t.postgresContainer.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
postgresContainer, err := postgres.RunContainer(context.Background(),
|
||||
testcontainers.WithImage("postgres"),
|
||||
postgres.WithDatabase("test_db"),
|
||||
postgres.WithPassword("123456"),
|
||||
testcontainers.WithWaitStrategy(
|
||||
wait.ForLog("database system is ready to accept connections").WithOccurrence(2).WithStartupTimeout(5*time.Second)),
|
||||
)
|
||||
if err != nil {
|
||||
log.Printf("failed to start postgres container: %s", err)
|
||||
return err
|
||||
}
|
||||
t.postgresContainer = postgresContainer
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartL1GethContainer starts a L1Geth container
|
||||
func (t *TestcontainerApps) StartL1GethContainer() error {
|
||||
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l1geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
Cmd: []string{"--log.debug", "ANY"},
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
Started: true,
|
||||
}
|
||||
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
|
||||
if err != nil {
|
||||
log.Printf("failed to start scroll_l1geth container: %s", err)
|
||||
return err
|
||||
}
|
||||
t.l1GethContainer, _ = container.(*testcontainers.DockerContainer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartL2GethContainer starts a L2Geth container
|
||||
func (t *TestcontainerApps) StartL2GethContainer() error {
|
||||
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
|
||||
return nil
|
||||
}
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l2geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
Started: true,
|
||||
}
|
||||
container, err := testcontainers.GenericContainer(context.Background(), genericContainerReq)
|
||||
if err != nil {
|
||||
log.Printf("failed to start scroll_l2geth container: %s", err)
|
||||
return err
|
||||
}
|
||||
t.l2GethContainer, _ = container.(*testcontainers.DockerContainer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDBEndPoint returns the endpoint of the running postgres container
|
||||
func (t *TestcontainerApps) GetDBEndPoint() (string, error) {
|
||||
if t.postgresContainer == nil || !t.postgresContainer.IsRunning() {
|
||||
return "", fmt.Errorf("postgres is not running")
|
||||
}
|
||||
return t.postgresContainer.ConnectionString(context.Background(), "sslmode=disable")
|
||||
}
|
||||
|
||||
// GetL1GethEndPoint returns the endpoint of the running L1Geth container
|
||||
func (t *TestcontainerApps) GetL1GethEndPoint() (string, error) {
|
||||
if t.l1GethContainer == nil || !t.l1GethContainer.IsRunning() {
|
||||
return "", fmt.Errorf("l1 geth is not running")
|
||||
}
|
||||
endpoint, err := t.l1GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetL2GethEndPoint returns the endpoint of the running L2Geth container
|
||||
func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
|
||||
if t.l2GethContainer == nil || !t.l2GethContainer.IsRunning() {
|
||||
return "", fmt.Errorf("l2 geth is not running")
|
||||
}
|
||||
endpoint, err := t.l2GethContainer.PortEndpoint(context.Background(), "8546/tcp", "ws")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
|
||||
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
endpoint, err := t.GetDBEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbCfg := &database.Config{
|
||||
DSN: endpoint,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
return database.InitDB(dbCfg)
|
||||
}
|
||||
|
||||
// GetL1GethClient returns a ethclient by dialing running L1Geth
|
||||
func (t *TestcontainerApps) GetL1GethClient() (*ethclient.Client, error) {
|
||||
endpoint, err := t.GetL1GethEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := ethclient.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// GetL2GethClient returns a ethclient by dialing running L2Geth
|
||||
func (t *TestcontainerApps) GetL2GethClient() (*ethclient.Client, error) {
|
||||
endpoint, err := t.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := ethclient.Dial(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Free stops all running containers
|
||||
func (t *TestcontainerApps) Free() {
|
||||
ctx := context.Background()
|
||||
if t.postgresContainer != nil && t.postgresContainer.IsRunning() {
|
||||
if err := t.postgresContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop postgres container: %s", err)
|
||||
}
|
||||
}
|
||||
if t.l1GethContainer != nil && t.l1GethContainer.IsRunning() {
|
||||
if err := t.l1GethContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop scroll_l1geth container: %s", err)
|
||||
}
|
||||
}
|
||||
if t.l2GethContainer != nil && t.l2GethContainer.IsRunning() {
|
||||
if err := t.l2GethContainer.Terminate(ctx); err != nil {
|
||||
log.Printf("failed to stop scroll_l2geth container: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
59
common/testcontainers/testcontainers_test.go
Normal file
59
common/testcontainers/testcontainers_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package testcontainers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// TestNewTestcontainerApps tests NewTestcontainerApps
|
||||
func TestNewTestcontainerApps(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
endpoint string
|
||||
gormDBclient *gorm.DB
|
||||
ethclient *ethclient.Client
|
||||
)
|
||||
|
||||
// test start testcontainers
|
||||
testApps := NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
endpoint, err = testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
gormDBclient, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, gormDBclient)
|
||||
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetL1GethClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, endpoint)
|
||||
ethclient, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, ethclient)
|
||||
|
||||
// test free testcontainers
|
||||
testApps.Free()
|
||||
endpoint, err = testApps.GetDBEndPoint()
|
||||
assert.EqualError(t, err, "postgres is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
|
||||
endpoint, err = testApps.GetL1GethEndPoint()
|
||||
assert.EqualError(t, err, "l1 geth is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
|
||||
endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.EqualError(t, err, "l2 geth is not running")
|
||||
assert.Empty(t, endpoint)
|
||||
}
|
||||
@@ -442,9 +442,9 @@ func EstimateBatchL1CommitGas(b *encoding.Batch) (uint64, error) {
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
|
||||
func EstimateBatchL1CommitCalldataSize(c *encoding.Batch) (uint64, error) {
|
||||
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) (uint64, error) {
|
||||
var totalL1CommitCalldataSize uint64
|
||||
for _, chunk := range c.Chunks {
|
||||
for _, chunk := range b.Chunks {
|
||||
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -227,18 +227,11 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
}
|
||||
|
||||
// blob payload
|
||||
blob, z, err := constructBlobPayload(batch.Chunks)
|
||||
blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
daBatch := DABatch{
|
||||
Version: CodecV1Version,
|
||||
BatchIndex: batch.Index,
|
||||
@@ -281,7 +274,7 @@ func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
|
||||
}
|
||||
|
||||
// constructBlobPayload constructs the 4844 blob payload.
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) {
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
|
||||
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
|
||||
metadataLength := 2 + MaxNumChunks*4
|
||||
|
||||
@@ -289,11 +282,8 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
blobBytes := make([]byte, metadataLength)
|
||||
|
||||
// challenge digest preimage
|
||||
// 1 hash for metadata and 1 for each chunk
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
|
||||
|
||||
// the challenge point z
|
||||
var z kzg4844.Point
|
||||
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
|
||||
|
||||
// the chunk data hash used for calculating the challenge preimage
|
||||
var chunkDataHash common.Hash
|
||||
@@ -312,7 +302,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// encode L2 txs into blob payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, common.Hash{}, nil, err
|
||||
}
|
||||
blobBytes = append(blobBytes, rlpTxData...)
|
||||
}
|
||||
@@ -344,15 +334,30 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// convert raw data to BLSFieldElements
|
||||
blob, err := makeBlobCanonical(blobBytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
// compute z = challenge_digest % BLS_MODULUS
|
||||
challengeDigest := crypto.Keccak256Hash(challengePreimage[:])
|
||||
point := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
|
||||
copy(z[:], point.Bytes()[0:32])
|
||||
// compute blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
return blob, &z, nil
|
||||
// challenge: append blob versioned hash
|
||||
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
|
||||
|
||||
// compute z = challenge_digest % BLS_MODULUS
|
||||
challengeDigest := crypto.Keccak256Hash(challengePreimage)
|
||||
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
|
||||
pointBytes := pointBigInt.Bytes()
|
||||
|
||||
// the challenge point z
|
||||
var z kzg4844.Point
|
||||
start := 32 - len(pointBytes)
|
||||
copy(z[start:], pointBytes)
|
||||
|
||||
return blob, blobVersionedHash, &z, nil
|
||||
}
|
||||
|
||||
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
|
||||
@@ -449,8 +454,55 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
|
||||
return BlobDataProofArgs.Pack(values...)
|
||||
}
|
||||
|
||||
// Blob returns the blob of the batch.
|
||||
func (b *DABatch) Blob() *kzg4844.Blob {
|
||||
return b.blob
|
||||
}
|
||||
|
||||
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
|
||||
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
|
||||
// TODO: implement this function.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// EstimateChunkL1CommitBlobSize estimates the size of the L1 commit blob for a single chunk.
|
||||
func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
|
||||
metadataSize := uint64(2 + 4*MaxNumChunks) // over-estimate: adding metadata length
|
||||
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
|
||||
func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
|
||||
metadataSize := uint64(2 + 4*MaxNumChunks)
|
||||
var batchDataSize uint64
|
||||
for _, c := range b.Chunks {
|
||||
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
batchDataSize += chunkDataSize
|
||||
}
|
||||
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
}
|
||||
|
||||
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
|
||||
var dataSize uint64
|
||||
for _, block := range c.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dataSize += uint64(len(rlpTxData))
|
||||
}
|
||||
}
|
||||
}
|
||||
return dataSize, nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -477,55 +479,125 @@ func TestCodecV1BatchChallenge(t *testing.T) {
|
||||
originalBatch := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
|
||||
batch, err := NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b36", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c99294", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e58", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk5}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk6}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
|
||||
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk7}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
// 15 chunks
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
|
||||
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk8, chunk9}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab", hex.EncodeToString(batch.z[:]))
|
||||
}
|
||||
|
||||
func repeat(element byte, count int) string {
|
||||
result := make([]byte, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
result = append(result, element)
|
||||
}
|
||||
return "0x" + common.Bytes2Hex(result)
|
||||
}
|
||||
|
||||
func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
|
||||
nRowsData := 126914
|
||||
|
||||
for _, tc := range []struct {
|
||||
chunks [][]string
|
||||
expectedz string
|
||||
expectedy string
|
||||
}{
|
||||
// single empty chunk
|
||||
{chunks: [][]string{{}}, expectedz: "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", expectedy: "304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd08"},
|
||||
// single non-empty chunk
|
||||
{chunks: [][]string{{"0x010203"}}, expectedz: "1c1d4bd5153f877d799853080aba243f2c186dd6d6064eaefacfe715c92b6354", expectedy: "24e80ed99526b0d15ba46f7ec682f517576ddae68d5131e5d351f8bae06ea7d3"},
|
||||
// multiple empty chunks
|
||||
{chunks: [][]string{{}, {}}, expectedz: "152c9ccfcc2884f9891f7adce2de110cf9f85bfd0e21f0933ae0636390a84d41", expectedy: "5f6f532676e25b49e2eae77513fbeca173a300b434c0a5e24fa554b68e27d582"},
|
||||
// multiple non-empty chunks
|
||||
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "62100f5381179ea7db7aa8fdedb0f7fc7b82730b75432d50ab41f80aeebe45a3", expectedy: "5b1f6e7a54907ddc06871853cf1f5d53bf2de0df7b61d0df84bc2c3fb80320cd"},
|
||||
// empty chunk followed by non-empty chunk
|
||||
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "2d94d241c4a2a8d8f02845ca40cfba344f3b42384af2045a75c82e725a184232", expectedy: "302416c177e9e7fe40c3bc4315066c117e27d246b0a33ef68cdda6dd333c485c"},
|
||||
// non-empty chunk followed by empty chunk
|
||||
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "7227567e3b1dbacb48a32bb85e4e99f73e4bd5620ea8cd4f5ac00a364c86af9c", expectedy: "2eb3dfd28362f35f562f779e749a555d2f1f87ddc716e95f04133d25189a391c"},
|
||||
// max number of chunks all empty
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "1128ac3e22ced6af85be4335e0d03a266946a7cade8047e7fc59d6c8be642321", expectedy: "2d9b16422ce17f328fd00c99349768f0cb0c8648115eb3bd9b7864617ba88059"},
|
||||
// max number of chunks all non-empty
|
||||
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1a4025a3d74e70b511007dd55a2e252478c48054c6383285e8a176f33d99853b", expectedy: "12071ac2571c11220432a27b8be549392892e9baf4c654748ca206def3843940"},
|
||||
// single chunk blob full
|
||||
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "72714cc4a0ca75cee2d543b1f958e3d3dd59ac7df0d9d5617d8117b65295a5f2", expectedy: "4ebb690362bcbc42321309c210c99f2ebdb53b3fcf7cf3b17b78f6cfd1203ed3"},
|
||||
// multiple chunks blob full
|
||||
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "70eb5b4db503e59413238eef451871c5d12f2bb96c8b96ceca012f4ca0114727", expectedy: "568d0aaf280ec83f9c81ed2d80ecbdf199bd72dafb8a350007d37ea82997e455"},
|
||||
// max number of chunks only last one non-empty not full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "03db68ae16ee88489d52db19e6111b25630c5f23ad7cd14530aacf0cd231d476", expectedy: "24527d0b0e93b3dec0060c7b128975a8088b3104d3a297dc807ab43862a77a1a"},
|
||||
// max number of chunks only last one non-empty full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "677670193f73db499cede572bcb55677f0d2f13d690f9a820bd00bf584c3c241", expectedy: "1d85677f172dbdf4ad3094a17deeb1df4d7d2b7f35ecea44aebffa757811a268"},
|
||||
// max number of chunks but last is empty
|
||||
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "22935042dfe7df771b02c1f5cababfe508869e8f6339dabe25a8a32e37728bb0", expectedy: "48ca66fb5a094401728c3a6a517ffbd72c4d4d9a8c907e2d2f1320812f4d856f"},
|
||||
} {
|
||||
chunks := []*encoding.Chunk{}
|
||||
|
||||
for _, c := range tc.chunks {
|
||||
block := &encoding.Block{Transactions: []*types.TransactionData{}}
|
||||
|
||||
for _, data := range c {
|
||||
tx := &types.TransactionData{Type: 0xff, Data: data}
|
||||
block.Transactions = append(block.Transactions, tx)
|
||||
}
|
||||
|
||||
chunk := &encoding.Chunk{Blocks: []*encoding.Block{block}}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
b, _, z, err := constructBlobPayload(chunks)
|
||||
assert.NoError(t, err)
|
||||
actualZ := hex.EncodeToString(z[:])
|
||||
assert.Equal(t, tc.expectedz, actualZ)
|
||||
|
||||
_, y, err := kzg4844.ComputeProof(*b, *z)
|
||||
assert.NoError(t, err)
|
||||
actualY := hex.EncodeToString(y[:])
|
||||
assert.Equal(t, tc.expectedy, actualY)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
@@ -536,7 +608,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err := batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b363d27683f7aab53cf071e2c8c8f3abfe750d206c048489450d120679cdc823f7db44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607a30dad96431f70551dd950c1426131d73ccea6d050d38dea123aad90aa8c0b734c98e8e04bd8ea8f19b415f2d85156d8", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef423dc493f1dd7c9fbecdffa021ca4649b13e8d72231487034ec6b27e155ecfd7b44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607b38542ec811c92d86ff6f3319de06ee60c42655278ccf874f3615f450de730895276828b73db03c553b0bc7e5474a5e0", hex.EncodeToString(verifyData))
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
@@ -545,7 +617,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c992946dc7e51a42a31f429bc1f321dcf020b9a661225259522dba186fcfe5dc012191b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e86a0b3c76e33edb24eb07faeaa5d3f2b15a55df6ab99abf828b5803f5681dc634602eb7469ee0556563b2eccebf16ec822", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e5821975f318babe50be728f9b52754d5ce2caa2ba82ba35b5888af1c5f28d23206b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e868e0c6fe7bd39baa5ee6339cd334a18af7c680d24e825262499e83b31633b13a9ee89813fae8441630c82bc9dce3f1e07", hex.EncodeToString(verifyData))
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
@@ -554,7 +626,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a24164e6ea8b7946ce5e40d2baa4f6aa0d030076f6074295288133c00e75dafa2afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d676248f5ca4a9f0d9b7fa48f5f649dc84e928161fd99ad1bd9a9879b05d29c5f718bfb3b0a696a5f3ed50b5b8c6a9d530b3ee", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8088a01e54e3565d2e91ce6afbadf479330847d9106737875303ce17f17c48722afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d67624aee03a0f7cdb7807bc7e0b9fb20bc299af2a35e38cda816708b40f2f18db491e14a0f5d9cfe2f4c12e4ca1a219484f17", hex.EncodeToString(verifyData))
|
||||
|
||||
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
@@ -563,7 +635,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
|
||||
@@ -572,7 +644,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
|
||||
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
|
||||
@@ -581,7 +653,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
// 15 chunks
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
|
||||
@@ -589,7 +661,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2613ca15d051a539e3b239027f9bdbd03bd3c66c98afafb674e2a7441912cbe099743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc93291fbc65cfa558e4df12bcde442483d31072000c56f94fe012285bc5832eaee5fe1d47f1e8655539c4500f66207d8edc6", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b54d28f1479467d8b97fb99f5257d3e5d63a81cb2d60e3564fe6ec6066a311c119743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc932a0b603cc94be2007d4b3b17af06b4fb30caf0e574d5abcfc5654079e65154679afad75844396082a7200a4e82462aeed", hex.EncodeToString(verifyData))
|
||||
|
||||
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
|
||||
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
@@ -598,7 +670,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c4330b3863672052b3d6c6552d121b0b13f97659f49bbfb6d7fed6e4b7076e4a43383bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fdae9cb71d402cfe8bc4d659f228c41f0b9d195c5074278a2346204cfaa336f5de2244a3d53e0effa2f49c81924720e84e", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab35b73ddb4a78fc4a8540f1d8259512c46e606a701e7ef7742e38cc4562ef53b983bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fda28e5610ca6b185d6ac30b53bd83d6366fccb1956daafa90ff6b504a966b119ebb45cb3f7085b7c1d622ee1ad27fcff9", hex.EncodeToString(verifyData))
|
||||
}
|
||||
|
||||
func TestCodecV1BatchSkipBitmap(t *testing.T) {
|
||||
@@ -687,6 +759,51 @@ func TestCodecV1BatchSkipBitmap(t *testing.T) {
|
||||
assert.Equal(t, 42, int(batch.TotalL1MessagePopped))
|
||||
}
|
||||
|
||||
func TestCodecV1ChunkAndBatchBlobSizeEstimation(t *testing.T) {
|
||||
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
|
||||
chunk2BlobSize, err := EstimateChunkL1CommitBlobSize(chunk2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(320), chunk2BlobSize)
|
||||
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
|
||||
batch2BlobSize, err := EstimateBatchL1CommitBlobSize(batch2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(320), batch2BlobSize)
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
chunk3BlobSize, err := EstimateChunkL1CommitBlobSize(chunk3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(5952), chunk3BlobSize)
|
||||
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
|
||||
batch3BlobSize, err := EstimateBatchL1CommitBlobSize(batch3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(5952), batch3BlobSize)
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk4BlobSize, err := EstimateChunkL1CommitBlobSize(chunk4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), chunk4BlobSize)
|
||||
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
|
||||
batch4BlobSize, err := EstimateBatchL1CommitBlobSize(batch4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), batch4BlobSize)
|
||||
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
|
||||
chunk5BlobSize, err := EstimateChunkL1CommitBlobSize(chunk5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6176), chunk5BlobSize)
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk6BlobSize, err := EstimateChunkL1CommitBlobSize(chunk6)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), chunk6BlobSize)
|
||||
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
|
||||
batch5BlobSize, err := EstimateBatchL1CommitBlobSize(batch5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6208), batch5BlobSize)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
data, err := os.ReadFile(filename)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -6,8 +6,30 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// CodecVersion defines the version of encoder and decoder.
|
||||
type CodecVersion int
|
||||
|
||||
const (
|
||||
// CodecV0 represents the version 0 of the encoder and decoder.
|
||||
CodecV0 CodecVersion = iota
|
||||
|
||||
// CodecV1 represents the version 1 of the encoder and decoder.
|
||||
CodecV1
|
||||
|
||||
// txTypeTest is a special transaction type used in unit tests.
|
||||
txTypeTest = 0xff
|
||||
)
|
||||
|
||||
func init() {
|
||||
// make sure txTypeTest will not interfere with other transaction types
|
||||
if txTypeTest == types.LegacyTxType || txTypeTest == types.AccessListTxType || txTypeTest == types.DynamicFeeTxType || txTypeTest == types.BlobTxType || txTypeTest == types.L1MessageTxType {
|
||||
log.Crit("txTypeTest is overlapping with existing transaction types")
|
||||
}
|
||||
}
|
||||
|
||||
// Block represents an L2 block.
|
||||
type Block struct {
|
||||
Header *types.Header
|
||||
@@ -123,6 +145,10 @@ func ConvertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
case txTypeTest:
|
||||
// in the tests, we simply use `data` as the RLP-encoded transaction
|
||||
return data, nil
|
||||
|
||||
case types.L1MessageTxType: // L1MessageTxType is not supported
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported tx type: %d", txData.Type)
|
||||
@@ -209,8 +235,3 @@ func (b *Batch) WithdrawRoot() common.Hash {
|
||||
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
|
||||
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].WithdrawRoot
|
||||
}
|
||||
|
||||
// NumChunks gets the number of chunks of the batch.
|
||||
func (b *Batch) NumChunks() uint64 {
|
||||
return uint64(len(b.Chunks))
|
||||
}
|
||||
|
||||
@@ -75,7 +75,6 @@ func TestUtilFunctions(t *testing.T) {
|
||||
assert.Equal(t, uint64(240000), chunk3.L2GasUsed())
|
||||
|
||||
// Test Batch methods
|
||||
assert.Equal(t, uint64(3), batch.NumChunks())
|
||||
assert.Equal(t, block6.Header.Root, batch.StateRoot())
|
||||
assert.Equal(t, block6.WithdrawRoot, batch.WithdrawRoot())
|
||||
}
|
||||
|
||||
91
common/types/message/auth_msg.go
Normal file
91
common/types/message/auth_msg.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
// HardForkName the hard fork name
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
89
common/types/message/legacy_auth_msg.go
Normal file
89
common/types/message/legacy_auth_msg.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type LegacyAuthMsg struct {
|
||||
// Message fields
|
||||
Identity *LegacyIdentity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// LegacyIdentity contains all the fields to be signed by the prover.
|
||||
type LegacyIdentity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LegacyAuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *LegacyAuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *LegacyIdentity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
@@ -58,26 +58,6 @@ const (
|
||||
ProofTypeBatch
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// GenerateToken generates token
|
||||
func GenerateToken() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
@@ -87,65 +67,6 @@ func GenerateToken() (string, error) {
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// ProofMsg is the data structure sent to the coordinator.
|
||||
type ProofMsg struct {
|
||||
*ProofDetail `json:"zkProof"`
|
||||
@@ -259,6 +180,7 @@ type ChunkInfo struct {
|
||||
WithdrawRoot common.Hash `json:"withdraw_root"`
|
||||
DataHash common.Hash `json:"data_hash"`
|
||||
IsPadding bool `json:"is_padding"`
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
}
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) {
|
||||
hash, err := identity.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
|
||||
expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.76"
|
||||
var tag = "v4.3.93"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"test:hardhat": "npx hardhat test",
|
||||
"test:forge": "forge test -vvv",
|
||||
"test:forge": "forge test -vvv --evm-version cancun",
|
||||
"test": "yarn test:hardhat && yarn test:forge",
|
||||
"solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'",
|
||||
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IScrollChain
|
||||
/// @notice The interface for ScrollChain.
|
||||
interface IScrollChain {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -43,23 +45,23 @@ interface IScrollChain {
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice The latest finalized batch index.
|
||||
/// @return The latest finalized batch index.
|
||||
function lastFinalizedBatchIndex() external view returns (uint256);
|
||||
|
||||
/// @notice Return the batch hash of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The batch hash of a committed batch.
|
||||
function committedBatches(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the state root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The state root of a committed batch.
|
||||
function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return the message root of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return The message root of a committed batch.
|
||||
function withdrawRoots(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
/// @notice Return whether the batch is finalized by batch index.
|
||||
/// @param batchIndex The index of the batch.
|
||||
/// @return Whether the batch is finalized by batch index.
|
||||
function isBatchFinalized(uint256 batchIndex) external view returns (bool);
|
||||
|
||||
/*****************************
|
||||
|
||||
@@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
/// @title MultipleVersionRollupVerifier
|
||||
/// @notice Verifies aggregate zk proofs using the appropriate verifier.
|
||||
contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/**********
|
||||
* Events *
|
||||
@@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************/
|
||||
|
||||
/// @notice The address of ScrollChain contract.
|
||||
address immutable scrollChain;
|
||||
address public immutable scrollChain;
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
@@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// The verifiers are sorted by batchIndex in increasing order.
|
||||
mapping(uint256 => Verifier[]) public legacyVerifiers;
|
||||
|
||||
/// @notice Mapping from verifier version to the lastest used zkevm verifier.
|
||||
/// @notice Mapping from verifier version to the latest used zkevm verifier.
|
||||
mapping(uint256 => Verifier) public latestVerifier;
|
||||
|
||||
/***************
|
||||
@@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
*************************/
|
||||
|
||||
/// @notice Return the number of legacy verifiers.
|
||||
/// @param _version The version of legacy verifiers.
|
||||
/// @return The number of legacy verifiers.
|
||||
function legacyVerifiersLength(uint256 _version) external view returns (uint256) {
|
||||
return legacyVerifiers[_version].length;
|
||||
}
|
||||
@@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// @notice Compute the verifier should be used for specific batch.
|
||||
/// @param _version The version of verifier to query.
|
||||
/// @param _batchIndex The batch index to query.
|
||||
/// @return The address of verifier.
|
||||
function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) {
|
||||
// Normally, we will use the latest verifier.
|
||||
Verifier memory _verifier = latestVerifier[_version];
|
||||
@@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of zkevm verifier.
|
||||
/// @param _version The version of the verifier.
|
||||
/// @param _startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param _verifier The address of new verifier.
|
||||
function updateVerifier(
|
||||
|
||||
@@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*************/
|
||||
|
||||
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
|
||||
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
@@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
*****************************/
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
/// @param _batchHeader The header of the genesis batch.
|
||||
/// @param _stateRoot The state root of the genesis block.
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
// check genesis batch header length
|
||||
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
@@ -475,7 +477,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
_postStateRoot,
|
||||
_withdrawRoot,
|
||||
_dataHash,
|
||||
_blobDataProof[0:64]
|
||||
_blobDataProof[0:64],
|
||||
_blobVersionedHash
|
||||
)
|
||||
);
|
||||
|
||||
@@ -877,7 +880,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
|
||||
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
|
||||
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
/// @title IRollupVerifier
|
||||
/// @notice The interface for rollup verifier.
|
||||
interface IRollupVerifier {
|
||||
/// @notice Verify aggregate zk proof.
|
||||
/// @param batchIndex The batch index to verify.
|
||||
|
||||
@@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier {
|
||||
}
|
||||
|
||||
// decodes all RLP encoded data and stores their DATA items
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continous memory region.
|
||||
// [length - 128 bits | calldata offset - 128 bits] in a continuous memory region.
|
||||
// Expects that the RLP starts with a list that defines the length
|
||||
// of the whole RLP region.
|
||||
function decodeFlat(_ptr) -> ptr, memStart, nItems, hash {
|
||||
@@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier {
|
||||
}
|
||||
|
||||
// the one and only boundary check
|
||||
// in case an attacker crafted a malicous payload
|
||||
// in case an attacker crafted a malicious payload
|
||||
// and succeeds in the prior verification steps
|
||||
// then this should catch any bogus accesses
|
||||
if iszero(eq(ptr, add(proof.offset, proof.length))) {
|
||||
|
||||
@@ -83,6 +83,8 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
|
||||
}
|
||||
|
||||
function testTransferUSDCRoles(address owner) external {
|
||||
hevm.assume(owner != address(0));
|
||||
|
||||
// non-whitelisted caller call, should revert
|
||||
hevm.expectRevert("only circle caller");
|
||||
gateway.transferUSDCRoles(owner);
|
||||
|
||||
@@ -38,7 +38,7 @@ make lint
|
||||
|
||||
## Configure
|
||||
|
||||
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
|
||||
The coordinator behavior can be configured using [`conf/config.json`](conf/config.json). Check the code comments under `ProverManager` in [`internal/config/config.go`](internal/config/config.go) for more details.
|
||||
|
||||
|
||||
## Start
|
||||
|
||||
@@ -12,11 +12,11 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
|
||||
coordinatorConfig "scroll-tech/coordinator/internal/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
coordinatorConfig "scroll-tech/coordinator/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -28,7 +28,7 @@ type CoordinatorApp struct {
|
||||
Config *coordinatorConfig.Config
|
||||
ChainConfig *params.ChainConfig
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
configOriginFile string
|
||||
chainConfigOriginFile string
|
||||
@@ -37,17 +37,17 @@ type CoordinatorApp struct {
|
||||
HTTPPort int64
|
||||
|
||||
args []string
|
||||
docker.AppAPI
|
||||
*cmd.Cmd
|
||||
}
|
||||
|
||||
// NewCoordinatorApp return a new coordinatorApp manager.
|
||||
func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile string) *CoordinatorApp {
|
||||
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", base.Timestamp)
|
||||
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", base.Timestamp)
|
||||
func NewCoordinatorApp(testApps *testcontainers.TestcontainerApps, configFile string, chainConfigFile string) *CoordinatorApp {
|
||||
coordinatorFile := fmt.Sprintf("/tmp/%d_coordinator-config.json", testApps.Timestamp)
|
||||
genesisFile := fmt.Sprintf("/tmp/%d_genesis.json", testApps.Timestamp)
|
||||
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
|
||||
httpPort := port.Int64() + httpStartPort
|
||||
coordinatorApp := &CoordinatorApp{
|
||||
base: base,
|
||||
testApps: testApps,
|
||||
configOriginFile: configFile,
|
||||
chainConfigOriginFile: chainConfigFile,
|
||||
coordinatorFile: coordinatorFile,
|
||||
@@ -63,14 +63,14 @@ func NewCoordinatorApp(base *docker.App, configFile string, chainConfigFile stri
|
||||
|
||||
// RunApp run coordinator-test child process by multi parameters.
|
||||
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
|
||||
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
c.Cmd = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.Cmd.RunApp(func() bool { return c.Cmd.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release coordinator-test.
|
||||
func (c *CoordinatorApp) Free() {
|
||||
if !utils.IsNil(c.AppAPI) {
|
||||
c.AppAPI.WaitExit()
|
||||
if !utils.IsNil(c.Cmd) {
|
||||
c.Cmd.WaitExit()
|
||||
}
|
||||
_ = os.Remove(c.coordinatorFile)
|
||||
}
|
||||
@@ -82,7 +82,6 @@ func (c *CoordinatorApp) HTTPEndpoint() string {
|
||||
|
||||
// MockConfig creates a new coordinator config.
|
||||
func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
base := c.base
|
||||
cfg, err := coordinatorConfig.NewConfig(c.configOriginFile)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -97,7 +96,11 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
MaxVerifierWorkers: 4,
|
||||
MinProverVersion: "v1.0.0",
|
||||
}
|
||||
cfg.DB.DSN = base.DBImg.Endpoint()
|
||||
endpoint, err := c.testApps.GetDBEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.DB.DSN = endpoint
|
||||
cfg.L2.ChainID = 111
|
||||
cfg.Auth.ChallengeExpireDurationSec = 1
|
||||
cfg.Auth.LoginExpireDurationSec = 1
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
"batch_collection_time_sec": 180,
|
||||
"chunk_collection_time_sec": 180,
|
||||
"verifier": {
|
||||
"fork_name": "bernoulli",
|
||||
"mock_mode": true,
|
||||
"params_path": "",
|
||||
"assets_path": ""
|
||||
|
||||
@@ -7,7 +7,7 @@ require (
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/shopspring/decimal v1.3.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
|
||||
@@ -173,8 +173,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -50,6 +50,7 @@ type Config struct {
|
||||
|
||||
// VerifierConfig load zk verifier config.
|
||||
type VerifierConfig struct {
|
||||
ForkName string `json:"fork_name"`
|
||||
MockMode bool `json:"mock_mode"`
|
||||
ParamsPath string `json:"params_path"`
|
||||
AssetsPath string `json:"assets_path"`
|
||||
|
||||
@@ -53,25 +53,44 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
// recover the public key
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
var publicKey string
|
||||
var err error
|
||||
if v.Message.HardForkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
HardForkName: v.Message.HardForkName,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
}
|
||||
|
||||
publicKey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
if v.Message.HardForkName == "" {
|
||||
v.Message.HardForkName = "shanghai"
|
||||
}
|
||||
|
||||
return jwt.MapClaims{
|
||||
types.PublicKey: publicKey,
|
||||
types.ProverName: v.Message.ProverName,
|
||||
types.ProverVersion: v.Message.ProverVersion,
|
||||
types.HardForkName: v.Message.HardForkName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,5 +108,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
|
||||
if proverVersion, ok := claims[types.ProverVersion]; ok {
|
||||
c.Set(types.ProverVersion, proverVersion)
|
||||
}
|
||||
|
||||
if hardForkName, ok := claims[types.HardForkName]; ok {
|
||||
c.Set(types.HardForkName, hardForkName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -25,6 +26,8 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D
|
||||
panic("proof receiver new verifier failure")
|
||||
}
|
||||
|
||||
log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap)
|
||||
|
||||
Auth = NewAuthController(db)
|
||||
GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg)
|
||||
SubmitProof = NewSubmitProofController(cfg, db, vf, reg)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -21,15 +23,21 @@ import (
|
||||
// GetTaskController the get prover task api controller
|
||||
type GetTaskController struct {
|
||||
proverTasks map[message.ProofType]provertask.ProverTask
|
||||
|
||||
getTaskAccessCounter *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewGetTaskController create a get prover task controller
|
||||
func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController {
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg)
|
||||
chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg)
|
||||
batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg)
|
||||
|
||||
ptc := &GetTaskController{
|
||||
proverTasks: make(map[message.ProofType]provertask.ProverTask),
|
||||
getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_access_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
}, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}),
|
||||
}
|
||||
|
||||
ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask
|
||||
@@ -38,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *
|
||||
return ptc
|
||||
}
|
||||
|
||||
func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return fmt.Errorf("get prover name from context failed")
|
||||
}
|
||||
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
|
||||
if !proverVersionExist {
|
||||
return fmt.Errorf("get prover version from context failed")
|
||||
}
|
||||
|
||||
ptc.getTaskAccessCounter.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverPublicKey: publicKey.(string),
|
||||
coordinatorType.LabelProverName: proverName.(string),
|
||||
coordinatorType.LabelProverVersion: proverVersion.(string),
|
||||
}).Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
@@ -55,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := ptc.incGetTaskAccessCounter(ctx); err != nil {
|
||||
log.Warn("get_task access counter inc failed", "error", err.Error())
|
||||
}
|
||||
|
||||
result, err := proverTask.Assign(ctx, &getTaskParameter)
|
||||
if err != nil {
|
||||
nerr := fmt.Errorf("return prover task err:%w", err)
|
||||
|
||||
@@ -31,16 +31,17 @@ type BatchProverTask struct {
|
||||
|
||||
batchAttemptsExceedTotal prometheus.Counter
|
||||
batchTaskGetTaskTotal *prometheus.CounterVec
|
||||
batchTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewBatchProverTask new a batch collector
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask {
|
||||
func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vk: vk,
|
||||
vkMap: vkMap,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -58,6 +59,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
Name: "coordinator_batch_get_task_total",
|
||||
Help: "Total number of batch get task.",
|
||||
}, []string{"fork_name"}),
|
||||
batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"),
|
||||
}
|
||||
return bp
|
||||
}
|
||||
@@ -69,9 +71,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName)
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -83,7 +85,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
@@ -93,8 +95,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
|
||||
if err != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
@@ -179,7 +181,12 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
@@ -209,6 +216,9 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
IsPadding: false,
|
||||
}
|
||||
if proof.ChunkInfo != nil {
|
||||
chunkInfo.TxBytes = proof.ChunkInfo.TxBytes
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
|
||||
@@ -29,15 +29,16 @@ type ChunkProverTask struct {
|
||||
|
||||
chunkAttemptsExceedTotal prometheus.Counter
|
||||
chunkTaskGetTaskTotal *prometheus.CounterVec
|
||||
chunkTaskGetTaskProver *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewChunkProverTask new a chunk prover task
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask {
|
||||
forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap)
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vk: vk,
|
||||
vkMap: vkMap,
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -55,6 +56,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
Name: "coordinator_chunk_get_task_total",
|
||||
Help: "Total number of chunk get task.",
|
||||
}, []string{"fork_name"}),
|
||||
chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"),
|
||||
}
|
||||
return cp
|
||||
}
|
||||
@@ -66,9 +68,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName)
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName)
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -151,7 +153,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
coordinatorType.LabelProverVersion: proverTask.ProverVersion,
|
||||
}).Inc()
|
||||
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
@@ -2,8 +2,12 @@ package provertask
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/version"
|
||||
@@ -13,11 +17,12 @@ import (
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
|
||||
// ErrHardForkName indicates client request with the wrong hard fork name
|
||||
var ErrHardForkName = fmt.Errorf("wrong hard fork name")
|
||||
var (
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
// ErrHardForkName indicates client request with the wrong hard fork name
|
||||
ErrHardForkName = fmt.Errorf("wrong hard fork name")
|
||||
)
|
||||
|
||||
// ProverTask the interface of a collector who send data to prover
|
||||
type ProverTask interface {
|
||||
@@ -28,8 +33,8 @@ type ProverTask interface {
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
vk string
|
||||
|
||||
vkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
|
||||
@@ -44,6 +49,7 @@ type proverTaskContext struct {
|
||||
PublicKey string
|
||||
ProverName string
|
||||
ProverVersion string
|
||||
HardForkName string
|
||||
}
|
||||
|
||||
// checkParameter check the prover task parameter illegal
|
||||
@@ -68,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
|
||||
}
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != b.vk {
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
@@ -115,3 +133,22 @@ func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error
|
||||
|
||||
return hardForkNumber, nil
|
||||
}
|
||||
|
||||
var (
|
||||
getTaskCounterInitOnce sync.Once
|
||||
getTaskCounterVec *prometheus.CounterVec = nil
|
||||
)
|
||||
|
||||
func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec {
|
||||
getTaskCounterInitOnce.Do(func() {
|
||||
getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "coordinator_get_task_count",
|
||||
Help: "Multi dimensions get task counter.",
|
||||
}, []string{"task_type",
|
||||
coordinatorType.LabelProverName,
|
||||
coordinatorType.LabelProverPublicKey,
|
||||
coordinatorType.LabelProverVersion})
|
||||
})
|
||||
|
||||
return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType})
|
||||
}
|
||||
|
||||
@@ -134,6 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
hardForkName := ctx.GetString(coordinatorType.HardForkName)
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
@@ -156,20 +157,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec)
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
|
||||
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil {
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.verifierTotal.WithLabelValues(pv).Inc()
|
||||
|
||||
var success bool
|
||||
success := true
|
||||
var verifyErr error
|
||||
if proofMsg.Type == message.ProofTypeChunk {
|
||||
success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof)
|
||||
} else if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof)
|
||||
// only verify batch proof. chunk proof verifier have been disabled after Bernoulli
|
||||
if proofMsg.Type == message.ProofTypeBatch {
|
||||
success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName)
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
@@ -178,7 +178,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
|
||||
if verifyErr != nil {
|
||||
return ErrValidatorFailureVerifiedFailed
|
||||
@@ -189,7 +189,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
|
||||
|
||||
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
|
||||
|
||||
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
|
||||
m.proofSubmitFailure.Inc()
|
||||
@@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) {
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
m.validateFailureTotal.Inc()
|
||||
@@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
"cannot submit valid proof for a prover task twice",
|
||||
"taskType", proverTask.TaskType, "hash", proofMsg.ID,
|
||||
"proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion,
|
||||
"proverPublicKey", proverTask.ProverPublicKey,
|
||||
"proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName,
|
||||
)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
log.Info("proof generated by prover failed",
|
||||
"taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType,
|
||||
"failureMessage", failureMsg)
|
||||
"failureMessage", failureMsg, "forkName", forkName)
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
@@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
m.validateFailureProverTaskTimeout.Inc()
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType,
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec)
|
||||
"proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk,
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
}
|
||||
|
||||
@@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov
|
||||
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
|
||||
m.validateFailureProverTaskHaveVerifier.Inc()
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk)
|
||||
"taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
return nil
|
||||
|
||||
BIN
coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey
Normal file
BIN
coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey
Normal file
Binary file not shown.
BIN
coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey
Normal file
BIN
coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey
Normal file
Binary file not shown.
@@ -9,8 +9,26 @@ import (
|
||||
)
|
||||
|
||||
// NewVerifier Sets up a mock verifier.
|
||||
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
|
||||
return &Verifier{}, nil
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
|
||||
// VerifyChunkProof return a mock verification result for a ChunkProof.
|
||||
@@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
}
|
||||
|
||||
// VerifyBatchProof return a mock verification result for a BatchProof.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
type Verifier struct {
|
||||
cfg *config.VerifierConfig
|
||||
BatchVK string
|
||||
ChunkVK string
|
||||
cfg *config.VerifierConfig
|
||||
ChunkVKMap map[string]string
|
||||
BatchVKMap map[string]string
|
||||
}
|
||||
|
||||
@@ -11,9 +11,11 @@ package verifier
|
||||
import "C" //nolint:typecheck
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"unsafe"
|
||||
@@ -28,7 +30,26 @@ import (
|
||||
// NewVerifier Sets up a rust ffi to call verify.
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
if cfg.MockMode {
|
||||
return &Verifier{cfg: cfg}, nil
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
|
||||
batchVKMap[cfg.ForkName] = ""
|
||||
chunkVKMap[cfg.ForkName] = ""
|
||||
return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil
|
||||
}
|
||||
paramsPathStr := C.CString(cfg.ParamsPath)
|
||||
assetsPathStr := C.CString(cfg.AssetsPath)
|
||||
@@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
C.init_batch_verifier(paramsPathStr, assetsPathStr)
|
||||
C.init_chunk_verifier(paramsPathStr, assetsPathStr)
|
||||
|
||||
batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
v := &Verifier{
|
||||
cfg: cfg,
|
||||
ChunkVKMap: make(map[string]string),
|
||||
BatchVKMap: make(map[string]string),
|
||||
}
|
||||
|
||||
batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.BatchVKMap[cfg.ForkName] = batchVK
|
||||
v.ChunkVKMap[cfg.ForkName] = chunkVK
|
||||
|
||||
return &Verifier{
|
||||
cfg: cfg,
|
||||
BatchVK: batchVK,
|
||||
ChunkVK: chunkVK,
|
||||
}, nil
|
||||
if err := v.loadEmbedVK(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier.
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) {
|
||||
if v.cfg.MockMode {
|
||||
log.Info("Mock mode, batch verifier disabled")
|
||||
if string(proof.Proof) == InvalidTestProof {
|
||||
@@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Info("Start to verify batch proof", "forkName", forkName)
|
||||
proofStr := C.CString(string(buf))
|
||||
forkNameStr := C.CString(forkName)
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(proofStr))
|
||||
C.free(unsafe.Pointer(forkNameStr))
|
||||
}()
|
||||
|
||||
log.Info("Start to verify batch proof ...")
|
||||
verified := C.verify_batch_proof(proofStr)
|
||||
verified := C.verify_batch_proof(proofStr, forkNameStr)
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
@@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) {
|
||||
return verified != 0, nil
|
||||
}
|
||||
|
||||
func readVK(filePat string) (string, error) {
|
||||
func (v *Verifier) readVK(filePat string) (string, error) {
|
||||
f, err := os.Open(filePat)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) {
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(byt), nil
|
||||
}
|
||||
|
||||
//go:embed legacy_vk/*
|
||||
var legacyVKFS embed.FS
|
||||
|
||||
func (v *Verifier) loadEmbedVK() error {
|
||||
batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed batch vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey")
|
||||
if err != nil {
|
||||
log.Error("load embed chunk vk failure", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -34,7 +33,7 @@ func TestFFI(t *testing.T) {
|
||||
AssetsPath: *assetsPath,
|
||||
}
|
||||
|
||||
v, err := verifier.NewVerifier(cfg)
|
||||
v, err := NewVerifier(cfg)
|
||||
as.NoError(err)
|
||||
|
||||
chunkProof1 := readChunkProof(*chunkProofPath1, as)
|
||||
@@ -50,7 +49,7 @@ func TestFFI(t *testing.T) {
|
||||
t.Log("Verified chunk proof 2")
|
||||
|
||||
batchProof := readBatchProof(*batchProofPath, as)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof)
|
||||
batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli")
|
||||
as.NoError(err)
|
||||
as.True(batchOk)
|
||||
t.Log("Verified batch proof")
|
||||
|
||||
@@ -24,6 +24,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -54,6 +55,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -73,22 +78,16 @@ func (*Batch) TableName() string {
|
||||
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
db = db.Where("start_chunk_index >= ?", startChunkIndex)
|
||||
db = db.Where("end_chunk_index < ?", endChunkIndex)
|
||||
|
||||
var batch Batch
|
||||
err := db.First(&batch).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBatches error: %w", err)
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBatch error: %w", err)
|
||||
}
|
||||
if batch.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
@@ -96,22 +95,16 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChun
|
||||
// GetAssignedBatch retrieves assigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
db = db.Where("start_chunk_index >= ?", startChunkIndex)
|
||||
db = db.Where("end_chunk_index < ?", endChunkIndex)
|
||||
|
||||
var batch Batch
|
||||
err := db.First(&batch).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM batch WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND chunk_proofs_status = %d AND start_chunk_index >= %d AND end_chunk_index < %d AND batch.deleted_at IS NULL ORDER BY batch.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, int(types.ChunkProofsStatusReady), startChunkIndex, endChunkIndex)
|
||||
err := db.Raw(sql).Scan(&batch).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetAssignedBatches error: %w", err)
|
||||
return nil, fmt.Errorf("Batch.GetAssignedBatch error: %w", err)
|
||||
}
|
||||
if batch.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
@@ -260,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -274,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ActiveAttempts: 0,
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -48,6 +48,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -71,22 +75,16 @@ func (*Chunk) TableName() string {
|
||||
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("start_block_number >= ?", fromBlockNum)
|
||||
db = db.Where("end_block_number < ?", toBlockNum)
|
||||
|
||||
var chunk Chunk
|
||||
err := db.First(&chunk).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskUnassigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunks error: %w", err)
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunk error: %w", err)
|
||||
}
|
||||
if chunk.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
@@ -94,22 +92,16 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, fromBlockNum, toBlockNum
|
||||
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, fromBlockNum, toBlockNum uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("start_block_number >= ?", fromBlockNum)
|
||||
db = db.Where("end_block_number < ?", toBlockNum)
|
||||
|
||||
var chunk Chunk
|
||||
err := db.First(&chunk).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
sql := fmt.Sprintf("SELECT * FROM chunk WHERE proving_status = %d AND total_attempts < %d AND active_attempts < %d AND start_block_number >= %d AND end_block_number < %d AND chunk.deleted_at IS NULL ORDER BY chunk.index LIMIT 1;",
|
||||
int(types.ProvingTaskAssigned), maxTotalAttempts, maxActiveAttempts, fromBlockNum, toBlockNum)
|
||||
err := db.Raw(sql).Scan(&chunk).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunks error: %w", err)
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunk error: %w", err)
|
||||
}
|
||||
if chunk.Hash == "" {
|
||||
return nil, nil
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
@@ -312,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -9,41 +9,36 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
db *gorm.DB
|
||||
proverTaskOrm *ProverTask
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
t := &testing.T{}
|
||||
setupEnv(t)
|
||||
defer tearDownEnv(t)
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
tearDownEnv(t)
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -56,10 +51,11 @@ func tearDownEnv(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
sqlDB.Close()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestProverTaskOrm(t *testing.T) {
|
||||
setupEnv(t)
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
@@ -9,6 +9,8 @@ const (
|
||||
ProverName = "prover_name"
|
||||
// ProverVersion the prover version for context
|
||||
ProverVersion = "prover_version"
|
||||
// HardForkName the fork name for context
|
||||
HardForkName = "hard_fork_name"
|
||||
)
|
||||
|
||||
// Message the login message struct
|
||||
@@ -16,6 +18,7 @@ type Message struct {
|
||||
Challenge string `form:"challenge" json:"challenge" binding:"required"`
|
||||
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
|
||||
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// LoginParameter for /login api
|
||||
|
||||
@@ -2,7 +2,6 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"`
|
||||
|
||||
10
coordinator/internal/types/metric.go
Normal file
10
coordinator/internal/types/metric.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package types
|
||||
|
||||
var (
|
||||
// LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below.
|
||||
LabelProverName = "prover_name"
|
||||
// LabelProverPublicKey label name for prover public key
|
||||
LabelProverPublicKey = "prover_pubkey"
|
||||
// LabelProverVersion label name for prover version
|
||||
LabelProverVersion = "prover_version"
|
||||
)
|
||||
@@ -21,8 +21,7 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/message"
|
||||
@@ -43,10 +42,9 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
dbCfg *database.Config
|
||||
conf *config.Config
|
||||
conf *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
@@ -70,13 +68,12 @@ var (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func randomURL() string {
|
||||
@@ -86,7 +83,8 @@ func randomURL() string {
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
db, err = database.InitDB(dbCfg)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -98,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ChainID: 111,
|
||||
},
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{MockMode: true},
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
MockMode: true,
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
@@ -115,8 +115,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
var chainConf params.ChainConfig
|
||||
for forkName, forkNumber := range nameForkMap {
|
||||
switch forkName {
|
||||
case "banach":
|
||||
chainConf.BanachBlock = big.NewInt(forkNumber)
|
||||
case "shanghai":
|
||||
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
|
||||
case "bernoulli":
|
||||
chainConf.BernoulliBlock = big.NewInt(forkNumber)
|
||||
case "london":
|
||||
chainConf.LondonBlock = big.NewInt(forkNumber)
|
||||
case "istanbul":
|
||||
@@ -149,20 +151,18 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.1.98"
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
dbCfg = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
|
||||
var err error
|
||||
db, err = database.InitDB(dbCfg)
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -199,7 +199,6 @@ func setEnv(t *testing.T) {
|
||||
|
||||
func TestApis(t *testing.T) {
|
||||
// Set up the test environment.
|
||||
base = docker.NewDockerApp()
|
||||
setEnv(t)
|
||||
|
||||
t.Run("TestHandshake", testHandshake)
|
||||
@@ -211,11 +210,6 @@ func TestApis(t *testing.T) {
|
||||
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
|
||||
t.Run("TestTimeoutProof", testTimeoutProof)
|
||||
t.Run("TestHardFork", testHardForkAssignTask)
|
||||
|
||||
// Teardown
|
||||
t.Cleanup(func() {
|
||||
base.Free()
|
||||
})
|
||||
}
|
||||
|
||||
func testHandshake(t *testing.T) {
|
||||
@@ -268,12 +262,12 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
@@ -284,12 +278,12 @@ func testGetTaskBlocked(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedErr = fmt.Errorf("get empty prover task")
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorEmptyProofData, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
@@ -309,12 +303,12 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead")
|
||||
assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code)
|
||||
assert.Equal(t, expectedErr, fmt.Errorf(errMsg))
|
||||
}
|
||||
@@ -332,18 +326,18 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{ // hard fork 4, prover 4 block [2-3]
|
||||
name: "noTaskForkChunkProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"banach": forkNumberFour},
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"banach", "banach"},
|
||||
proverForkNames: []string{"bernoulli", "bernoulli"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"banach": forkNumberFour},
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"banach", "banach"},
|
||||
proverForkNames: []string{"bernoulli", "bernoulli"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
@@ -368,7 +362,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
@@ -431,7 +425,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{ // hard fork 4, prover 3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionLessThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"banach": forkNumberFour, "istanbul": forkNumberTwo},
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour, "istanbul": forkNumberTwo},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"istanbul", "istanbul"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
@@ -458,7 +452,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{ // hard fork 3, prover1:2 prover2:3 block [2-3]
|
||||
name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
@@ -467,7 +461,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{
|
||||
name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree},
|
||||
exceptTaskNumber: 2,
|
||||
proverForkNames: []string{"", "london"},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.Success},
|
||||
@@ -476,7 +470,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
{ // hard fork 2, prover 2 block [2-3]
|
||||
name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"london": forkNumberThree},
|
||||
forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree},
|
||||
exceptTaskNumber: 1,
|
||||
proverForkNames: []string{"", ""},
|
||||
exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData},
|
||||
@@ -544,7 +538,7 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
getTaskNumber++
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success)
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
|
||||
}
|
||||
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
|
||||
})
|
||||
@@ -587,7 +581,7 @@ func testValidProof(t *testing.T) {
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
assert.NotNil(t, proverTask)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success)
|
||||
provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul")
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -653,34 +647,21 @@ func testInvalidProof(t *testing.T) {
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// create mock provers.
|
||||
provers := make([]*mockProver, 2)
|
||||
for i := 0; i < len(provers); i++ {
|
||||
var proofType message.ProofType
|
||||
if i%2 == 0 {
|
||||
proofType = message.ProofTypeChunk
|
||||
} else {
|
||||
proofType = message.ProofTypeBatch
|
||||
}
|
||||
provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
}
|
||||
proofType := message.ProofTypeBatch
|
||||
provingStatus := verifiedFailed
|
||||
expectErrCode := types.ErrCoordinatorHandleZkProofFailure
|
||||
prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version)
|
||||
proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul")
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul")
|
||||
|
||||
// verify proof status
|
||||
var (
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
|
||||
var (
|
||||
chunkProofStatus types.ProvingStatus
|
||||
tick = time.Tick(1500 * time.Millisecond)
|
||||
tickStop = time.Tick(time.Minute)
|
||||
batchProofStatus types.ProvingStatus
|
||||
chunkActiveAttempts int16
|
||||
chunkMaxAttempts int16
|
||||
batchActiveAttempts int16
|
||||
batchMaxAttempts int16
|
||||
)
|
||||
@@ -688,24 +669,17 @@ func testInvalidProof(t *testing.T) {
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
|
||||
if batchProofStatus == types.ProvingTaskAssigned {
|
||||
return
|
||||
}
|
||||
chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(chunkMaxAttempts))
|
||||
assert.Equal(t, 0, int(chunkActiveAttempts))
|
||||
|
||||
batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, int(batchMaxAttempts))
|
||||
assert.Equal(t, 0, int(batchActiveAttempts))
|
||||
case <-tickStop:
|
||||
t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String())
|
||||
t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String())
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -745,7 +719,7 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
assert.NotNil(t, proverTask)
|
||||
assert.Equal(t, errCode, types.Success)
|
||||
assert.Equal(t, errMsg, "")
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure)
|
||||
provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul")
|
||||
}
|
||||
|
||||
// verify proof status
|
||||
@@ -868,14 +842,14 @@ func testTimeoutProof(t *testing.T) {
|
||||
assert.NotNil(t, proverChunkTask2)
|
||||
assert.Equal(t, chunkTask2ErrCode, types.Success)
|
||||
assert.Equal(t, chunkTask2ErrMsg, "")
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success)
|
||||
chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
|
||||
batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version)
|
||||
proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul")
|
||||
assert.NotNil(t, proverBatchTask2)
|
||||
assert.Equal(t, batchTask2ErrCode, types.Success)
|
||||
assert.Equal(t, batchTask2ErrMsg, "")
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success)
|
||||
batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul")
|
||||
|
||||
// verify proof status, it should be verified now, because second prover sent valid proof
|
||||
chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash)
|
||||
|
||||
@@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof
|
||||
}
|
||||
|
||||
// connectToCoordinator sets up a websocket client to connect to the prover manager.
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T) string {
|
||||
func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string {
|
||||
challengeString := r.challenge(t)
|
||||
return r.login(t, challengeString)
|
||||
return r.login(t, challengeString, forkName)
|
||||
}
|
||||
|
||||
func (r *mockProver) challenge(t *testing.T) string {
|
||||
@@ -76,18 +76,32 @@ func (r *mockProver) challenge(t *testing.T) string {
|
||||
return loginData.Token
|
||||
}
|
||||
|
||||
func (r *mockProver) login(t *testing.T, challengeString string) string {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
|
||||
var body string
|
||||
if forkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
HardForkName: forkName,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
|
||||
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
|
||||
var result ctypes.Response
|
||||
client := resty.New()
|
||||
@@ -137,7 +151,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool {
|
||||
|
||||
func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -151,7 +165,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
|
||||
resp, err := client.R().
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}).
|
||||
SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}).
|
||||
SetResult(&result).
|
||||
Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task")
|
||||
assert.NoError(t, err)
|
||||
@@ -160,9 +174,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo
|
||||
}
|
||||
|
||||
// Testing expected errors returned by coordinator.
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) {
|
||||
//
|
||||
//nolint:unparam
|
||||
func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) {
|
||||
// get task from coordinator
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
type response struct {
|
||||
@@ -185,7 +201,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType)
|
||||
return result.ErrCode, result.ErrMsg
|
||||
}
|
||||
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) {
|
||||
proofMsgStatus := message.StatusOk
|
||||
if proofStatus == generatedFailed {
|
||||
proofMsgStatus = message.StatusProofError
|
||||
@@ -228,7 +244,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc
|
||||
submitProof.Proof = string(encodeData)
|
||||
}
|
||||
|
||||
token := r.connectToCoordinator(t)
|
||||
token := r.connectToCoordinator(t, forkName)
|
||||
assert.NotEmpty(t, token)
|
||||
|
||||
submitProofData, err := json.Marshal(submitProof)
|
||||
|
||||
@@ -6,7 +6,7 @@ require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/pressly/goose/v3 v3.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
)
|
||||
@@ -20,6 +20,7 @@ require (
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/jackc/pgx/v5 v5.5.4 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
@@ -31,7 +32,6 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
golang.org/x/crypto v0.17.0 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
|
||||
@@ -58,8 +58,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.5.0 h1:NxstgwndsTRy7eq9/kqYc/BZh5w2hHJV86wjvO+1xPw=
|
||||
github.com/jackc/pgx/v5 v5.5.0/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
|
||||
github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
|
||||
github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
|
||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
@@ -119,8 +119,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
|
||||
@@ -1,93 +1,89 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
"scroll-tech/database"
|
||||
"scroll-tech/common/testcontainers"
|
||||
)
|
||||
|
||||
var (
|
||||
base *docker.App
|
||||
pgDB *sqlx.DB
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
pgDB *sql.DB
|
||||
)
|
||||
|
||||
func initEnv(t *testing.T) error {
|
||||
func setupEnv(t *testing.T) {
|
||||
// Start db container.
|
||||
base.RunDBImage(t)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
gormClient, err := testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
pgDB, err = gormClient.DB()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
// Create db orm handler.
|
||||
factory, err := database.NewOrmFactory(base.DBConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pgDB = factory.GetDB()
|
||||
return nil
|
||||
func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
base = docker.NewDockerApp()
|
||||
if err := initEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
setupEnv(t)
|
||||
t.Run("testCurrent", testCurrent)
|
||||
t.Run("testStatus", testStatus)
|
||||
t.Run("testResetDB", testResetDB)
|
||||
t.Run("testMigrate", testMigrate)
|
||||
t.Run("testRollback", testRollback)
|
||||
|
||||
t.Cleanup(func() {
|
||||
base.Free()
|
||||
})
|
||||
}
|
||||
|
||||
func testCurrent(t *testing.T) {
|
||||
cur, err := Current(pgDB.DB)
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
func testStatus(t *testing.T) {
|
||||
status := Status(pgDB.DB)
|
||||
status := Status(pgDB)
|
||||
assert.NoError(t, status)
|
||||
}
|
||||
|
||||
func testResetDB(t *testing.T) {
|
||||
assert.NoError(t, ResetDB(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, ResetDB(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(16), cur)
|
||||
assert.Equal(t, int64(17), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB.DB))
|
||||
cur, err := Current(pgDB.DB)
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), cur)
|
||||
assert.Equal(t, int64(17), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB.DB)
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), version)
|
||||
assert.Equal(t, int64(17), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB.DB, nil))
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
cur, err := Current(pgDB.DB)
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, version, cur+1)
|
||||
|
||||
targetVersion := int64(0)
|
||||
assert.NoError(t, Rollback(pgDB.DB, &targetVersion))
|
||||
assert.NoError(t, Rollback(pgDB, &targetVersion))
|
||||
|
||||
cur, err = Current(pgDB.DB)
|
||||
cur, err = Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
27
database/migrate/migrations/00017_add_blob_meta_data.sql
Normal file
27
database/migrate/migrations/00017_add_blob_meta_data.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE chunk
|
||||
ADD COLUMN crc_max INTEGER DEFAULT 0,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
ALTER TABLE batch
|
||||
ADD COLUMN data_hash VARCHAR DEFAULT '',
|
||||
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE IF EXISTS batch
|
||||
DROP COLUMN data_hash,
|
||||
DROP COLUMN blob_data_proof,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
ALTER TABLE IF EXISTS chunk
|
||||
DROP COLUMN crc_max,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
-- +goose StatementEnd
|
||||
10
go.work.sum
10
go.work.sum
@@ -660,6 +660,7 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7 h1:6IrxszG5G+O7zhtkWxq6+unVvnrm1fqV2Pe+T95DUzw=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0=
|
||||
@@ -779,8 +780,6 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk=
|
||||
github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
|
||||
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0=
|
||||
@@ -1560,6 +1559,9 @@ github.com/tdewolff/parse/v2 v2.6.4 h1:KCkDvNUMof10e3QExio9OPZJT8SbdKojLBumw8YZy
|
||||
github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs=
|
||||
github.com/tdewolff/test v1.0.7 h1:8Vs0142DmPFW/bQeHRP3MV19m1gvndjUb1sn8yy74LM=
|
||||
github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE=
|
||||
github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU=
|
||||
github.com/testcontainers/testcontainers-go/modules/compose v0.28.0/go.mod h1:lShXm8oldlLck3ltA5u+ShSvUnZ+wiNxwpp8wAQGZ1Y=
|
||||
github.com/testcontainers/testcontainers-go/modules/postgres v0.28.0/go.mod h1:fXgcYpbyrduNdiz2qRZuYkmvqLnEqsjbQiBNYH1ystI=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
@@ -1737,8 +1739,6 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E=
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
|
||||
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4=
|
||||
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
|
||||
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
|
||||
@@ -1768,6 +1768,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1952,6 +1953,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
|
||||
@@ -21,14 +21,15 @@ import (
|
||||
type CoordinatorClient struct {
|
||||
client *resty.Client
|
||||
|
||||
proverName string
|
||||
priv *ecdsa.PrivateKey
|
||||
proverName string
|
||||
hardForkName string
|
||||
priv *ecdsa.PrivateKey
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewCoordinatorClient constructs a new CoordinatorClient.
|
||||
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
|
||||
func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) {
|
||||
client := resty.New().
|
||||
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
|
||||
SetRetryCount(cfg.RetryCount).
|
||||
@@ -50,9 +51,10 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
|
||||
"retry wait time (second)", cfg.RetryWaitTimeSec)
|
||||
|
||||
return &CoordinatorClient{
|
||||
client: client,
|
||||
proverName: proverName,
|
||||
priv: priv,
|
||||
client: client,
|
||||
proverName: proverName,
|
||||
hardForkName: hardForkName,
|
||||
priv: priv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -83,6 +85,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
|
||||
ProverVersion: version.Version,
|
||||
ProverName: c.proverName,
|
||||
Challenge: challengeResult.Data.Token,
|
||||
HardForkName: c.hardForkName,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -97,10 +100,12 @@ func (c *CoordinatorClient) Login(ctx context.Context) error {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}{
|
||||
Challenge: authMsg.Identity.Challenge,
|
||||
ProverName: authMsg.Identity.ProverName,
|
||||
ProverVersion: authMsg.Identity.ProverVersion,
|
||||
HardForkName: authMsg.Identity.HardForkName,
|
||||
},
|
||||
Signature: authMsg.Signature,
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ type LoginRequest struct {
|
||||
Challenge string `json:"challenge"`
|
||||
ProverName string `json:"prover_name"`
|
||||
ProverVersion string `json:"prover_version"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
} `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
@@ -41,7 +42,6 @@ type LoginResponse struct {
|
||||
|
||||
// GetTaskRequest defines the request structure for GetTask API
|
||||
type GetTaskRequest struct {
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
TaskType message.ProofType `json:"task_type"`
|
||||
ProverHeight uint64 `json:"prover_height,omitempty"`
|
||||
VK string `json:"vk"`
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"scroll-tech/prover/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
@@ -30,7 +30,7 @@ func getIndex() int {
|
||||
type ProverApp struct {
|
||||
Config *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
originFile string
|
||||
proverFile string
|
||||
@@ -39,11 +39,11 @@ type ProverApp struct {
|
||||
index int
|
||||
name string
|
||||
args []string
|
||||
docker.AppAPI
|
||||
*cmd.Cmd
|
||||
}
|
||||
|
||||
// NewProverApp return a new proverApp manager.
|
||||
func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
|
||||
func NewProverApp(testApps *testcontainers.TestcontainerApps, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
|
||||
var proofType message.ProofType
|
||||
switch mockName {
|
||||
case utils.ChunkProverApp:
|
||||
@@ -54,17 +54,17 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
|
||||
return nil
|
||||
}
|
||||
name := string(mockName)
|
||||
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", base.Timestamp, name)
|
||||
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", testApps.Timestamp, name)
|
||||
proverApp := &ProverApp{
|
||||
base: base,
|
||||
testApps: testApps,
|
||||
originFile: file,
|
||||
proverFile: proverFile,
|
||||
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", base.Timestamp, name),
|
||||
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", testApps.Timestamp, name),
|
||||
index: getIndex(),
|
||||
name: name,
|
||||
args: []string{"--log.debug", "--config", proverFile},
|
||||
}
|
||||
proverApp.AppAPI = cmd.NewCmd(proverApp.name, proverApp.args...)
|
||||
proverApp.Cmd = cmd.NewCmd(proverApp.name, proverApp.args...)
|
||||
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -73,13 +73,13 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
|
||||
|
||||
// RunApp run prover-test child process by multi parameters.
|
||||
func (r *ProverApp) RunApp(t *testing.T) {
|
||||
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
|
||||
r.Cmd.RunApp(func() bool { return r.Cmd.WaitResult(t, time.Second*40, "prover start successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release prover-test.
|
||||
func (r *ProverApp) Free() {
|
||||
if !utils.IsNil(r.AppAPI) {
|
||||
r.AppAPI.WaitExit()
|
||||
if !utils.IsNil(r.Cmd) {
|
||||
r.Cmd.WaitExit()
|
||||
}
|
||||
_ = os.Remove(r.proverFile)
|
||||
_ = os.Remove(r.Config.KeystorePath)
|
||||
@@ -93,8 +93,13 @@ func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.Pro
|
||||
return err
|
||||
}
|
||||
cfg.ProverName = fmt.Sprintf("%s_%d", r.name, r.index)
|
||||
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.base.Timestamp, cfg.ProverName)
|
||||
cfg.L2Geth.Endpoint = r.base.L2gethImg.Endpoint()
|
||||
cfg.KeystorePath = fmt.Sprintf("/tmp/%d_%s.json", r.testApps.Timestamp, cfg.ProverName)
|
||||
|
||||
endpoint, err := r.testApps.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.L2Geth.Endpoint = endpoint
|
||||
cfg.L2Geth.Confirmations = rpc.LatestBlockNumber
|
||||
// Reuse l1geth's keystore file
|
||||
cfg.KeystorePassword = "scrolltest"
|
||||
|
||||
@@ -7,8 +7,11 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -24,8 +27,7 @@ var (
|
||||
paramsPath = flag.String("params", "/assets/test_params", "params dir")
|
||||
assetsPath = flag.String("assets", "/assets/test_assets", "assets dir")
|
||||
proofDumpPath = flag.String("dump", "/assets/proof_data", "the path proofs dump to")
|
||||
tracePath1 = flag.String("trace1", "/assets/traces/1_transfer.json", "chunk trace 1")
|
||||
tracePath2 = flag.String("trace2", "/assets/traces/10_transfer.json", "chunk trace 2")
|
||||
batchDirPath = flag.String("batch-dir", "/assets/traces/batch_24", "batch directory")
|
||||
batchVkPath = flag.String("batch-vk", "/assets/test_assets/agg_vk.vkey", "batch vk")
|
||||
chunkVkPath = flag.String("chunk-vk", "/assets/test_assets/chunk_vk.vkey", "chunk vk")
|
||||
)
|
||||
@@ -46,23 +48,34 @@ func TestFFI(t *testing.T) {
|
||||
as.Equal(chunkProverCore.VK, readVk(*chunkVkPath, as))
|
||||
t.Log("Chunk VK must be available when init")
|
||||
|
||||
chunkTrace1 := readChunkTrace(*tracePath1, as)
|
||||
chunkTrace2 := readChunkTrace(*tracePath2, as)
|
||||
t.Log("Loaded chunk traces")
|
||||
// Get the list of subdirectories (chunks)
|
||||
chunkDirs, err := os.ReadDir(*batchDirPath)
|
||||
as.NoError(err)
|
||||
sort.Slice(chunkDirs, func(i, j int) bool {
|
||||
return chunkDirs[i].Name() < chunkDirs[j].Name()
|
||||
})
|
||||
|
||||
chunkInfo1, err := chunkProverCore.TracesToChunkInfo(chunkTrace1)
|
||||
as.NoError(err)
|
||||
chunkInfo2, err := chunkProverCore.TracesToChunkInfo(chunkTrace2)
|
||||
as.NoError(err)
|
||||
t.Log("Converted to chunk infos")
|
||||
chunkInfos := make([]*message.ChunkInfo, 0, len(chunkDirs))
|
||||
chunkProofs := make([]*message.ChunkProof, 0, len(chunkDirs))
|
||||
|
||||
chunkProof1, err := chunkProverCore.ProveChunk("chunk_proof1", chunkTrace1)
|
||||
as.NoError(err)
|
||||
t.Log("Generated and dumped chunk proof 1")
|
||||
for i, dir := range chunkDirs {
|
||||
if dir.IsDir() {
|
||||
chunkPath := filepath.Join(*batchDirPath, dir.Name())
|
||||
|
||||
chunkProof2, err := chunkProverCore.ProveChunk("chunk_proof2", chunkTrace2)
|
||||
as.NoError(err)
|
||||
t.Log("Generated and dumped chunk proof 2")
|
||||
chunkTrace := readChunkTrace(chunkPath, as)
|
||||
t.Logf("Loaded chunk trace %d", i+1)
|
||||
|
||||
chunkInfo, err := chunkProverCore.TracesToChunkInfo(chunkTrace)
|
||||
as.NoError(err)
|
||||
chunkInfos = append(chunkInfos, chunkInfo)
|
||||
t.Logf("Converted to chunk info %d", i+1)
|
||||
|
||||
chunkProof, err := chunkProverCore.ProveChunk(fmt.Sprintf("chunk_proof%d", i+1), chunkTrace)
|
||||
as.NoError(err)
|
||||
chunkProofs = append(chunkProofs, chunkProof)
|
||||
t.Logf("Generated and dumped chunk proof %d", i+1)
|
||||
}
|
||||
}
|
||||
|
||||
as.Equal(chunkProverCore.VK, readVk(*chunkVkPath, as))
|
||||
t.Log("Chunk VKs must be equal after proving")
|
||||
@@ -79,8 +92,6 @@ func TestFFI(t *testing.T) {
|
||||
as.Equal(batchProverCore.VK, readVk(*batchVkPath, as))
|
||||
t.Log("Batch VK must be available when init")
|
||||
|
||||
chunkInfos := []*message.ChunkInfo{chunkInfo1, chunkInfo2}
|
||||
chunkProofs := []*message.ChunkProof{chunkProof1, chunkProof2}
|
||||
_, err = batchProverCore.ProveBatch("batch_proof", chunkInfos, chunkProofs)
|
||||
as.NoError(err)
|
||||
t.Log("Generated and dumped batch proof")
|
||||
@@ -88,20 +99,46 @@ func TestFFI(t *testing.T) {
|
||||
as.Equal(batchProverCore.VK, readVk(*batchVkPath, as))
|
||||
t.Log("Batch VKs must be equal after proving")
|
||||
}
|
||||
|
||||
func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
|
||||
f, err := os.Open(filePat)
|
||||
as.NoError(err)
|
||||
defer func() {
|
||||
as.NoError(f.Close())
|
||||
}()
|
||||
byt, err := io.ReadAll(f)
|
||||
fileInfo, err := os.Stat(filePat)
|
||||
as.NoError(err)
|
||||
|
||||
trace := &types.BlockTrace{}
|
||||
as.NoError(json.Unmarshal(byt, trace))
|
||||
var traces []*types.BlockTrace
|
||||
|
||||
return []*types.BlockTrace{trace}
|
||||
readFile := func(path string) {
|
||||
f, err := os.Open(path)
|
||||
as.NoError(err)
|
||||
defer func() {
|
||||
as.NoError(f.Close())
|
||||
}()
|
||||
byt, err := io.ReadAll(f)
|
||||
as.NoError(err)
|
||||
|
||||
trace := &types.BlockTrace{}
|
||||
as.NoError(json.Unmarshal(byt, trace))
|
||||
|
||||
traces = append(traces, trace)
|
||||
}
|
||||
|
||||
if fileInfo.IsDir() {
|
||||
files, err := os.ReadDir(filePat)
|
||||
as.NoError(err)
|
||||
|
||||
// Sort files alphabetically
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].Name() < files[j].Name()
|
||||
})
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
readFile(filepath.Join(filePat, file.Name()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
readFile(filePat)
|
||||
}
|
||||
|
||||
return traces
|
||||
}
|
||||
|
||||
func readVk(filePat string, as *assert.Assertions) string {
|
||||
|
||||
@@ -5,7 +5,7 @@ go 1.21
|
||||
require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
|
||||
@@ -168,8 +168,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -82,7 +82,7 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) {
|
||||
}
|
||||
log.Info("init prover_core successfully!")
|
||||
|
||||
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv)
|
||||
coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error {
|
||||
func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) {
|
||||
// prepare the request
|
||||
req := &client.GetTaskRequest{
|
||||
HardForkName: r.cfg.HardForkName,
|
||||
TaskType: r.Type(),
|
||||
TaskType: r.Type(),
|
||||
// we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask
|
||||
// instead of passing vk when we login
|
||||
VK: r.proverCore.VK,
|
||||
|
||||
@@ -24,7 +24,7 @@ rollup_relayer: ## Builds the rollup_relayer bin
|
||||
test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
lint: mock_abi ## Lint the files - used for CI
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
})
|
||||
|
||||
log.Info("Start event-watcher successfully")
|
||||
log.Info("Start event-watcher successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -83,7 +84,7 @@ func action(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
@@ -108,7 +109,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully")
|
||||
log.Info("Start gas-oracle successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -7,19 +7,19 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// MockApp mockApp-test client manager.
|
||||
type MockApp struct {
|
||||
Config *config.Config
|
||||
base *docker.App
|
||||
Config *config.Config
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
mockApps map[utils.MockAppName]docker.AppAPI
|
||||
mockApps map[utils.MockAppName]*cmd.Cmd
|
||||
|
||||
originFile string
|
||||
rollupFile string
|
||||
@@ -27,13 +27,12 @@ type MockApp struct {
|
||||
args []string
|
||||
}
|
||||
|
||||
// NewRollupApp return a new rollupApp manager, name mush be one them.
|
||||
func NewRollupApp(base *docker.App, file string) *MockApp {
|
||||
|
||||
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", base.Timestamp)
|
||||
// NewRollupApp return a new rollupApp manager.
|
||||
func NewRollupApp(testApps *testcontainers.TestcontainerApps, file string) *MockApp {
|
||||
rollupFile := fmt.Sprintf("/tmp/%d_rollup-config.json", testApps.Timestamp)
|
||||
rollupApp := &MockApp{
|
||||
base: base,
|
||||
mockApps: make(map[utils.MockAppName]docker.AppAPI),
|
||||
testApps: testApps,
|
||||
mockApps: make(map[utils.MockAppName]*cmd.Cmd),
|
||||
originFile: file,
|
||||
rollupFile: rollupFile,
|
||||
args: []string{"--log.debug", "--config", rollupFile},
|
||||
@@ -69,7 +68,7 @@ func (b *MockApp) WaitExit() {
|
||||
for _, app := range b.mockApps {
|
||||
app.WaitExit()
|
||||
}
|
||||
b.mockApps = make(map[utils.MockAppName]docker.AppAPI)
|
||||
b.mockApps = make(map[utils.MockAppName]*cmd.Cmd)
|
||||
}
|
||||
|
||||
// Free stop and release rollup mocked apps.
|
||||
@@ -80,18 +79,29 @@ func (b *MockApp) Free() {
|
||||
|
||||
// MockConfig creates a new rollup config.
|
||||
func (b *MockApp) MockConfig(store bool) error {
|
||||
base := b.base
|
||||
// Load origin rollup config file.
|
||||
cfg, err := config.NewConfig(b.originFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg.L1Config.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.DBConfig.DSN = base.DBImg.Endpoint()
|
||||
l1GethEndpoint, err := b.testApps.GetL1GethEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l2GethEndpoint, err := b.testApps.GetL2GethEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbEndpoint, err := b.testApps.GetDBEndPoint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.L1Config.Endpoint = l1GethEndpoint
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2GethEndpoint
|
||||
cfg.L2Config.Endpoint = l2GethEndpoint
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1GethEndpoint
|
||||
cfg.DBConfig.DSN = dbEndpoint
|
||||
b.Config = cfg
|
||||
|
||||
if !store {
|
||||
|
||||
@@ -72,18 +72,18 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
genesisPath := ctx.String(utils.Genesis.Name)
|
||||
genesis, err := utils.ReadGenesis(genesisPath)
|
||||
if err != nil {
|
||||
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, genesis.Config, db, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create chunkProposer", "config file", cfgFile, "error", err)
|
||||
@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
log.Info("Start rollup-relayer successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -10,11 +10,10 @@
|
||||
"sender_config": {
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"escalate_blocks": 1,
|
||||
"confirmations": "0x1",
|
||||
"confirmations": "0x0",
|
||||
"escalate_multiple_num": 2,
|
||||
"escalate_multiple_den": 1,
|
||||
"max_gas_price": 1000000000000,
|
||||
"max_blob_gas_price": 10000000000000,
|
||||
"tx_type": "LegacyTx",
|
||||
"check_pending_time": 1
|
||||
},
|
||||
@@ -35,7 +34,7 @@
|
||||
"sender_config": {
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"escalate_blocks": 1,
|
||||
"confirmations": "0x6",
|
||||
"confirmations": "0x0",
|
||||
"escalate_multiple_num": 2,
|
||||
"escalate_multiple_den": 1,
|
||||
"max_gas_price": 1000000000000,
|
||||
|
||||
@@ -10,7 +10,7 @@ require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
|
||||
@@ -237,8 +237,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935 h1:bHBt6sillaT4o/9RjxkVX8pWwvEmu37uWBw4XbCjfzY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240314095130-4553f5f26935/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e h1:FcoK0rykAWI+5E7cQM6ALRLd5CmjBTHRvJztRBH2xeM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240326144132-0f0cd99f7a2e/go.mod h1:7Rz2bh9pn42rGuxjh51CG7HL9SKMG3ZugJkL3emdZx8=
|
||||
github.com/scroll-tech/zktrie v0.7.1 h1:NrmZNjuBzsbrKePqdHDG+t2cXnimbtezPAFS0+L9ElE=
|
||||
github.com/scroll-tech/zktrie v0.7.1/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -13,13 +13,17 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
"scroll-tech/common/types/encoding/codecv1"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
@@ -61,10 +65,12 @@ type Layer2Relayer struct {
|
||||
chainMonitorClient *resty.Client
|
||||
|
||||
metrics *l2RelayerMetrics
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
var gasOracleSender, commitSender, finalizeSender *sender.Sender
|
||||
var err error
|
||||
|
||||
@@ -133,7 +139,8 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
cfg: cfg,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
}
|
||||
|
||||
// chain_monitor client
|
||||
@@ -189,7 +196,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var dbChunk *orm.Chunk
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, encoding.CodecV0, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk: %v", err)
|
||||
}
|
||||
@@ -206,7 +213,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
}
|
||||
|
||||
var dbBatch *orm.Batch
|
||||
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, dbTX)
|
||||
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
@@ -239,9 +246,9 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
|
||||
// encode "importGenesisBatch" transaction calldata
|
||||
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
|
||||
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if packErr != nil {
|
||||
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
@@ -284,8 +291,8 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if batch == nil || err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -330,95 +337,78 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
batches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
|
||||
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
for _, batch := range batches {
|
||||
for _, dbBatch := range dbBatches {
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
|
||||
// get current header and parent header.
|
||||
daBatch, err := codecv0.NewDABatchFromBytes(batch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("Failed to initialize new DA batch from bytes", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
return
|
||||
}
|
||||
parentBatch := &orm.Batch{}
|
||||
if batch.Index > 0 {
|
||||
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
if err != nil {
|
||||
log.Error("Failed to get parent batch header", "index", batch.Index-1, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if types.RollupStatus(parentBatch.RollupStatus) == types.RollupCommitFailed {
|
||||
log.Error("Previous batch commit failed, halting further committing",
|
||||
"index", parentBatch.Index, "tx hash", parentBatch.CommitTxHash)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get the metadata of chunks for the batch
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, batch.StartChunkIndex, batch.EndChunkIndex)
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch chunks",
|
||||
"start index", batch.StartChunkIndex,
|
||||
"end index", batch.EndChunkIndex, "error", err)
|
||||
log.Error("failed to get chunks in range", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
var blocks []*encoding.Block
|
||||
blocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch blocks", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
blocks, getErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get blocks in range", "err", getErr)
|
||||
return
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: blocks,
|
||||
}
|
||||
var daChunk *codecv0.DAChunk
|
||||
daChunk, err = codecv0.NewDAChunk(chunk, c.TotalL1MessagesPoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("Failed to initialize new DA chunk", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
return
|
||||
}
|
||||
var daChunkBytes []byte
|
||||
daChunkBytes, err = daChunk.Encode()
|
||||
if err != nil {
|
||||
log.Error("Failed to encode DA chunk", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
return
|
||||
}
|
||||
encodedChunks[i] = daChunkBytes
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
calldata, err := r.l1RollupABI.Pack("commitBatch", daBatch.Version, parentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack commitBatch", "index", batch.Index, "error", err)
|
||||
if dbBatch.Index == 0 {
|
||||
log.Error("invalid args: batch index is 0, should only happen in committing genesis batch")
|
||||
return
|
||||
}
|
||||
|
||||
// send transaction
|
||||
fallbackGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
|
||||
if types.RollupStatus(batch.RollupStatus) == types.RollupCommitFailed {
|
||||
// use eth_estimateGas if this batch has been committed failed.
|
||||
fallbackGasLimit = 0
|
||||
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", batch.Hash)
|
||||
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get parent batch header", "err", getErr)
|
||||
return
|
||||
}
|
||||
txHash, err := r.commitSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, calldata, nil, fallbackGasLimit)
|
||||
|
||||
var calldata []byte
|
||||
var blob *kzg4844.Blob
|
||||
if !r.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0
|
||||
calldata, err = r.constructCommitBatchPayloadCodecV0(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv0", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else { // codecv1
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv1", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// fallbackGasLimit is non-zero only in sending non-blob transactions.
|
||||
fallbackGasLimit := uint64(float64(dbBatch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
|
||||
if types.RollupStatus(dbBatch.RollupStatus) == types.RollupCommitFailed {
|
||||
// use eth_estimateGas if this batch has been committed and failed at least once.
|
||||
fallbackGasLimit = 0
|
||||
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", dbBatch.Hash)
|
||||
}
|
||||
|
||||
txHash, err := r.commitSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, blob, fallbackGasLimit)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
"err", err,
|
||||
@@ -426,13 +416,13 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batch.Hash, txHash.String(), types.RollupCommitting)
|
||||
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", dbBatch.Hash, "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -501,105 +491,118 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
|
||||
func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error {
|
||||
// Check batch status before send `finalizeBatch` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
var batchStatus bool
|
||||
batchStatus, err := r.getBatchStatusByIndex(batch)
|
||||
batchStatus, err := r.getBatchStatusByIndex(dbBatch)
|
||||
if err != nil {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
|
||||
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", dbBatch.Index, "err", err)
|
||||
return err
|
||||
}
|
||||
if !batchStatus {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
|
||||
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", dbBatch.Index)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
var parentBatch *orm.Batch
|
||||
parentBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
// handle unexpected db error
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
|
||||
return err
|
||||
}
|
||||
parentBatchStateRoot = parentBatch.StateRoot
|
||||
if dbBatch.Index == 0 {
|
||||
return fmt.Errorf("invalid args: batch index is 0, should only happen in finalizing genesis batch")
|
||||
}
|
||||
|
||||
var txCalldata []byte
|
||||
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get batch, index: %d, err: %w", dbBatch.Index-1, getErr)
|
||||
}
|
||||
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch chunks: %w", err)
|
||||
}
|
||||
|
||||
var aggProof *message.BatchProof
|
||||
if withProof {
|
||||
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, batch.Hash)
|
||||
if err != nil {
|
||||
log.Error("get verified proof by hash failed", "hash", batch.Hash, "err", err)
|
||||
return err
|
||||
aggProof, getErr = r.batchOrm.GetVerifiedProofByHash(r.ctx, dbBatch.Hash)
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get verified proof by hash, index: %d, err: %w", dbBatch.Index, getErr)
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Error("agg_proof sanity check fails", "hash", batch.Hash, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
txCalldata, err = r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
txCalldata, err = r.l1RollupABI.Pack(
|
||||
"finalizeBatch",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatch failed", "err", err)
|
||||
return err
|
||||
return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.finalizeSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, txCalldata, nil, 0)
|
||||
finalizeTxHash := &txHash
|
||||
var calldata []byte
|
||||
if !r.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct commitBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else { // codecv1
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if dbErr != nil {
|
||||
return fmt.Errorf("failed to fetch blocks: %w", dbErr)
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(txCalldata),
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
"err", err,
|
||||
)
|
||||
return err
|
||||
}
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash)
|
||||
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch.Hash, finalizeTxHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Updating the proving status when finalizing without proof, thus the coordinator could omit this task.
|
||||
// it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus
|
||||
if !withProof {
|
||||
txErr := r.db.Transaction(func(tx *gorm.DB) error {
|
||||
if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if txErr != nil {
|
||||
log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr)
|
||||
}
|
||||
}
|
||||
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
@@ -729,6 +732,140 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, error) {
|
||||
daBatch, err := codecv0.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch from bytes: %w", err)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv0.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
daChunkBytes, encodeErr := daChunk.Encode()
|
||||
if encodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to encode DA chunk: %w", encodeErr)
|
||||
}
|
||||
encodedChunks[i] = daChunkBytes
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv1.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv1.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
encodedChunks[i] = daChunk.Encode()
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if packErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
|
||||
}
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, aggProof *message.BatchProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBatch with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBatch without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatch",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatch: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk, aggProof *message.BatchProof) ([]byte, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv1.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
blobDataProof, getErr := daBatch.BlobDataProof()
|
||||
if getErr != nil {
|
||||
return nil, fmt.Errorf("failed to get blob data proof: %w", getErr)
|
||||
}
|
||||
|
||||
if aggProof != nil { // finalizeBatch4844 with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof4844",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
blobDataProof,
|
||||
aggProof.Proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof4844: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBatch4844 without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatch4844",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
blobDataProof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatch4844: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
|
||||
// for unit test
|
||||
func (r *Layer2Relayer) StopSenders() {
|
||||
|
||||
@@ -7,11 +7,14 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
@@ -40,128 +43,192 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.StopSenders()
|
||||
l2Cfg := cfg.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BernoulliBlock = big.NewInt(0)
|
||||
}
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
relayer.StopSenders()
|
||||
patchGuard.Reset()
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.StopSenders()
|
||||
l2Cfg := cfg.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BernoulliBlock = big.NewInt(0)
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
relayer.StopSenders()
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.StopSenders()
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BernoulliBlock = big.NewInt(0)
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
time.Sleep(time.Second)
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return batchStatus && chunkStatus
|
||||
})
|
||||
assert.True(t, ok)
|
||||
relayer.StopSenders()
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
@@ -172,7 +239,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -182,13 +249,13 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batch := &encoding.Batch{
|
||||
Index: uint64(i),
|
||||
Index: uint64(i + 1),
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
}
|
||||
@@ -228,7 +295,7 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -238,13 +305,13 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batch := &encoding.Batch{
|
||||
Index: uint64(i),
|
||||
Index: uint64(i + 1),
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
}
|
||||
@@ -288,7 +355,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1)
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch2 := &encoding.Batch{
|
||||
@@ -298,14 +365,14 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2)
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2GasOracle, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -345,7 +412,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2GasOracle, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
@@ -440,32 +507,32 @@ func testGetBatchStatusByIndex(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
|
||||
status, err := relayer.getBatchStatusByIndex(dbBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, status)
|
||||
|
||||
@@ -14,7 +14,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
dockercompose "scroll-tech/common/docker-compose/l1"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
@@ -25,7 +26,8 @@ var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
posL1TestEnv *dockercompose.PoSL1TestEnv
|
||||
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
@@ -51,15 +53,25 @@ func setupEnv(t *testing.T) {
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
base.RunImages(t)
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
assert.NoError(t, err, "failed to create PoS L1 test environment")
|
||||
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn, err := testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.DBConfig = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
DSN: dsn,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
@@ -67,7 +79,7 @@ func setupEnv(t *testing.T) {
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = base.L2Client()
|
||||
l2Cli, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
|
||||
@@ -94,11 +106,15 @@ func setupEnv(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
if posL1TestEnv != nil {
|
||||
posL1TestEnv.Stop()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestFunctions(t *testing.T) {
|
||||
|
||||
@@ -37,9 +37,6 @@ const (
|
||||
|
||||
// DynamicFeeTxType type for DynamicFeeTx
|
||||
DynamicFeeTxType = "DynamicFeeTx"
|
||||
|
||||
// BlobTxType type for BlobTx
|
||||
BlobTxType = "BlobTx"
|
||||
)
|
||||
|
||||
// Confirmation struct used to indicate transaction confirmation details
|
||||
@@ -163,8 +160,9 @@ func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTy
|
||||
case LegacyTxType:
|
||||
return s.estimateLegacyGas(target, data, fallbackGasLimit)
|
||||
case DynamicFeeTxType:
|
||||
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
|
||||
case BlobTxType:
|
||||
if sidecar == nil {
|
||||
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
|
||||
}
|
||||
return s.estimateBlobGas(target, data, sidecar, baseFee, blobBaseFee, fallbackGasLimit)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
|
||||
@@ -181,7 +179,7 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
err error
|
||||
)
|
||||
|
||||
if s.config.TxType == BlobTxType {
|
||||
if blob != nil {
|
||||
sidecar, err = makeSidecar(blob)
|
||||
if err != nil {
|
||||
log.Error("failed to make sidecar for blob transaction", "error", err)
|
||||
@@ -235,39 +233,36 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data
|
||||
Data: data,
|
||||
}
|
||||
case DynamicFeeTxType:
|
||||
txData = &gethTypes.DynamicFeeTx{
|
||||
Nonce: nonce,
|
||||
To: target,
|
||||
Data: data,
|
||||
Gas: feeData.gasLimit,
|
||||
AccessList: feeData.accessList,
|
||||
ChainID: s.chainID,
|
||||
GasTipCap: feeData.gasTipCap,
|
||||
GasFeeCap: feeData.gasFeeCap,
|
||||
}
|
||||
case BlobTxType:
|
||||
if target == nil {
|
||||
log.Error("blob transaction to address cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
|
||||
return nil, errors.New("blob transaction to address cannot be nil")
|
||||
}
|
||||
|
||||
if sidecar == nil {
|
||||
log.Error("blob transaction sidecar cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
|
||||
return nil, errors.New("blob transaction sidecar cannot be nil")
|
||||
}
|
||||
txData = &gethTypes.DynamicFeeTx{
|
||||
Nonce: nonce,
|
||||
To: target,
|
||||
Data: data,
|
||||
Gas: feeData.gasLimit,
|
||||
AccessList: feeData.accessList,
|
||||
ChainID: s.chainID,
|
||||
GasTipCap: feeData.gasTipCap,
|
||||
GasFeeCap: feeData.gasFeeCap,
|
||||
}
|
||||
} else {
|
||||
if target == nil {
|
||||
log.Error("blob transaction to address cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
|
||||
return nil, errors.New("blob transaction to address cannot be nil")
|
||||
}
|
||||
|
||||
txData = &gethTypes.BlobTx{
|
||||
ChainID: uint256.MustFromBig(s.chainID),
|
||||
Nonce: nonce,
|
||||
GasTipCap: uint256.MustFromBig(feeData.gasTipCap),
|
||||
GasFeeCap: uint256.MustFromBig(feeData.gasFeeCap),
|
||||
Gas: feeData.gasLimit,
|
||||
To: *target,
|
||||
Data: data,
|
||||
AccessList: feeData.accessList,
|
||||
BlobFeeCap: uint256.MustFromBig(feeData.blobGasFeeCap),
|
||||
BlobHashes: sidecar.BlobHashes(),
|
||||
Sidecar: sidecar,
|
||||
txData = &gethTypes.BlobTx{
|
||||
ChainID: uint256.MustFromBig(s.chainID),
|
||||
Nonce: nonce,
|
||||
GasTipCap: uint256.MustFromBig(feeData.gasTipCap),
|
||||
GasFeeCap: uint256.MustFromBig(feeData.gasFeeCap),
|
||||
Gas: feeData.gasLimit,
|
||||
To: *target,
|
||||
Data: data,
|
||||
AccessList: feeData.accessList,
|
||||
BlobFeeCap: uint256.MustFromBig(feeData.blobGasFeeCap),
|
||||
BlobHashes: sidecar.BlobHashes(),
|
||||
Sidecar: sidecar,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -357,94 +352,95 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas
|
||||
txInfo["adjusted_gas_price"] = gasPrice.Uint64()
|
||||
|
||||
case DynamicFeeTxType:
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
if tx.BlobTxSidecar() == nil {
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
|
||||
gasTipCap = new(big.Int).Div(gasTipCap, escalateMultipleDen)
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
|
||||
gasFeeCap = new(big.Int).Div(gasFeeCap, escalateMultipleDen)
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
|
||||
gasTipCap = new(big.Int).Div(gasTipCap, escalateMultipleDen)
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
|
||||
gasFeeCap = new(big.Int).Div(gasFeeCap, escalateMultipleDen)
|
||||
|
||||
// adjust for rising basefee
|
||||
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
// adjust for rising basefee
|
||||
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
if originalGasTipCap.Cmp(gasTipCap) == 0 {
|
||||
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
|
||||
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
|
||||
}
|
||||
|
||||
if originalGasFeeCap.Cmp(gasFeeCap) == 0 {
|
||||
log.Warn("gas fee cap bump corner case, add 1 wei", "original", originalGasFeeCap.Uint64(), "adjusted", gasFeeCap.Uint64())
|
||||
gasFeeCap = new(big.Int).Add(gasFeeCap, big.NewInt(1))
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
} else {
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
originalBlobGasFeeCap := tx.BlobGasFeeCap()
|
||||
|
||||
// bumping at least 100%
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
|
||||
blobGasFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
|
||||
|
||||
// adjust for rising basefee
|
||||
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
// adjust for rising blobbasefee
|
||||
currentBlobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
|
||||
if blobGasFeeCap.Cmp(currentBlobGasFeeCap) < 0 {
|
||||
blobGasFeeCap = currentBlobGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxBlobGasPrice
|
||||
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
|
||||
blobGasFeeCap = maxBlobGasPrice
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
feeData.blobGasFeeCap = blobGasFeeCap
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
txInfo["original_blob_gas_fee_cap"] = originalBlobGasFeeCap.Uint64()
|
||||
txInfo["adjusted_blob_gas_fee_cap"] = blobGasFeeCap.Uint64()
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
if originalGasTipCap.Cmp(gasTipCap) == 0 {
|
||||
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
|
||||
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
|
||||
}
|
||||
|
||||
if originalGasFeeCap.Cmp(gasFeeCap) == 0 {
|
||||
log.Warn("gas fee cap bump corner case, add 1 wei", "original", originalGasFeeCap.Uint64(), "adjusted", gasFeeCap.Uint64())
|
||||
gasFeeCap = new(big.Int).Add(gasFeeCap, big.NewInt(1))
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
|
||||
case BlobTxType:
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
originalBlobGasFeeCap := tx.BlobGasFeeCap()
|
||||
|
||||
// bumping at least 100%
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
|
||||
blobGasFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
|
||||
|
||||
// adjust for rising basefee
|
||||
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
// adjust for rising blobbasefee
|
||||
currentBlobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
|
||||
if blobGasFeeCap.Cmp(currentBlobGasFeeCap) < 0 {
|
||||
blobGasFeeCap = currentBlobGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxBlobGasPrice
|
||||
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
|
||||
blobGasFeeCap = maxBlobGasPrice
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
feeData.blobGasFeeCap = blobGasFeeCap
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
txInfo["original_blob_gas_fee_cap"] = originalBlobGasFeeCap.Uint64()
|
||||
txInfo["adjusted_blob_gas_fee_cap"] = blobGasFeeCap.Uint64()
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
|
||||
}
|
||||
|
||||
@@ -26,9 +26,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
dockercompose "scroll-tech/common/docker-compose/l1"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
@@ -38,33 +37,27 @@ import (
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
)
|
||||
|
||||
const TXBatch = 50
|
||||
|
||||
var (
|
||||
privateKey *ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
posL1TestEnv *dockercompose.PoSL1TestEnv
|
||||
txTypes = []string{"LegacyTx", "DynamicFeeTx", "BlobTx"}
|
||||
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
|
||||
txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
|
||||
txUint8Types = []uint8{0, 2, 3}
|
||||
db *gorm.DB
|
||||
testContractsAddress common.Address
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
defer base.Free()
|
||||
|
||||
var err error
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
if err != nil {
|
||||
log.Crit("failed to create PoS L1 test environment", "err", err)
|
||||
}
|
||||
if err := posL1TestEnv.Start(); err != nil {
|
||||
log.Crit("failed to start PoS L1 test environment", "err", err)
|
||||
}
|
||||
defer posL1TestEnv.Stop()
|
||||
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
if posL1TestEnv != nil {
|
||||
posL1TestEnv.Stop()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
@@ -80,17 +73,18 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
privateKey = priv
|
||||
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
assert.NoError(t, err, "failed to create PoS L1 test environment")
|
||||
assert.NoError(t, posL1TestEnv.Start(), "failed to start PoS L1 test environment")
|
||||
|
||||
base.RunDBImage(t)
|
||||
db, err = database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL1GethContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
|
||||
db, err = testApps.GetGormDBClient()
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
@@ -110,7 +104,7 @@ func setupEnv(t *testing.T) {
|
||||
|
||||
testContractsAddress = crypto.CreateAddress(auth.From, nonce)
|
||||
|
||||
tx := gethTypes.NewContractCreation(nonce, big.NewInt(0), 1000000, big.NewInt(10000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
tx := gethTypes.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(10000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err := auth.Signer(auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
err = l1Client.SendTransaction(context.Background(), signedTx)
|
||||
@@ -158,14 +152,14 @@ func testNewSender(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
// exit by Stop()
|
||||
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1 := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1.TxType = txType
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
newSender1.Stop()
|
||||
|
||||
// exit by ctx.Done()
|
||||
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2 := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2.TxType = txType
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
@@ -180,12 +174,12 @@ func testSendAndRetrieveTransaction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash, err := s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
|
||||
hash, err := s.SendTransaction("0", &common.Address{}, nil, txBlob[i], 0)
|
||||
assert.NoError(t, err)
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -210,12 +204,12 @@ func testSendAndRetrieveTransaction(t *testing.T) {
|
||||
}
|
||||
|
||||
func testFallbackGasLimit(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
for i, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
@@ -225,7 +219,7 @@ func testFallbackGasLimit(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// FallbackGasLimit = 0
|
||||
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
|
||||
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, txBlob[i], 0)
|
||||
assert.NoError(t, err)
|
||||
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
|
||||
assert.NoError(t, err)
|
||||
@@ -245,7 +239,7 @@ func testFallbackGasLimit(t *testing.T) {
|
||||
},
|
||||
)
|
||||
|
||||
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, randBlob(), 100000)
|
||||
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, txBlob[i], 100000)
|
||||
assert.NoError(t, err)
|
||||
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
|
||||
assert.NoError(t, err)
|
||||
@@ -263,8 +257,8 @@ func testFallbackGasLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func testResubmitZeroGasPriceTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
if txType == BlobTxType {
|
||||
for i, txType := range txTypes {
|
||||
if txBlob[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -272,7 +266,7 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -312,7 +306,7 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -323,16 +317,20 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
data, err := l2GasOracleABI.Pack("setL2BaseFee", big.NewInt(int64(i+1)))
|
||||
assert.NoError(t, err)
|
||||
|
||||
sidecar, err := makeSidecar(randBlob())
|
||||
assert.NoError(t, err)
|
||||
var sidecar *gethTypes.BlobTxSidecar
|
||||
if txBlob[i] != nil {
|
||||
sidecar, err = makeSidecar(txBlob[i])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
gasLimit, accessList, err := s.estimateGasLimit(&testContractsAddress, data, sidecar, nil, big.NewInt(1000000000), big.NewInt(1000000000), big.NewInt(1000000000))
|
||||
assert.NoError(t, err)
|
||||
|
||||
if txType == LegacyTxType { // Legacy transactions can not have an access list.
|
||||
assert.Equal(t, uint64(43935), gasLimit)
|
||||
assert.Equal(t, uint64(43956), gasLimit)
|
||||
assert.Nil(t, accessList)
|
||||
} else { // Dynamic fee and blob transactions can have an access list.
|
||||
assert.Equal(t, uint64(43458), gasLimit)
|
||||
assert.Equal(t, uint64(43479), gasLimit)
|
||||
assert.NotNil(t, accessList)
|
||||
}
|
||||
|
||||
@@ -341,12 +339,12 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
for i, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
// Bump gas price, gas tip cap and gas fee cap just touch the minimum threshold of 10% (default config of geth).
|
||||
cfgCopy.EscalateMultipleNum = 110
|
||||
cfgCopy.EscalateMultipleDen = 100
|
||||
@@ -360,8 +358,11 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
blobGasFeeCap: big.NewInt(1000000000),
|
||||
gasLimit: 50000,
|
||||
}
|
||||
sidecar, err := makeSidecar(randBlob())
|
||||
assert.NoError(t, err)
|
||||
var sidecar *gethTypes.BlobTxSidecar
|
||||
if txBlob[i] != nil {
|
||||
sidecar, err = makeSidecar(txBlob[i])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, sidecar, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, tx)
|
||||
@@ -383,8 +384,8 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
}
|
||||
|
||||
func testResubmitUnderpricedTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
if txType == BlobTxType {
|
||||
for i, txType := range txTypes {
|
||||
if txBlob[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -392,7 +393,7 @@ func testResubmitUnderpricedTransaction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
// Bump gas price, gas tip cap and gas fee cap less than 10% (default config of geth).
|
||||
cfgCopy.EscalateMultipleNum = 109
|
||||
cfgCopy.EscalateMultipleDen = 100
|
||||
@@ -431,7 +432,7 @@ func testResubmitDynamicFeeTransactionWithRisingBaseFee(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
txType := "DynamicFeeTx"
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
@@ -473,9 +474,8 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
txType := "BlobTx"
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = DynamicFeeTxType
|
||||
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -531,7 +531,7 @@ func testCheckPendingTransactionTxConfirmed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -572,7 +572,7 @@ func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeFinalizeBatch, db, nil)
|
||||
@@ -632,7 +632,7 @@ func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
|
||||
@@ -702,7 +702,7 @@ func testCheckPendingTransactionTxMultipleTimesWithOnlyOneTxPending(t *testing.T
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
|
||||
@@ -777,8 +777,8 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = BlobTxType
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = DynamicFeeTxType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
|
||||
assert.NoError(t, err)
|
||||
defer s.Stop()
|
||||
|
||||
@@ -3,6 +3,7 @@ package watcher
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -14,10 +15,10 @@ import (
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
// BatchProposer proposes batches based on available unbatched chunks.
|
||||
@@ -36,12 +37,15 @@ type BatchProposer struct {
|
||||
gasCostIncreaseMultiplier float64
|
||||
forkMap map[uint64]bool
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
batchProposerCircleTotal prometheus.Counter
|
||||
proposeBatchFailureTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoFailureTotal prometheus.Counter
|
||||
totalL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalL1CommitBlobSize prometheus.Gauge
|
||||
batchChunksNum prometheus.Gauge
|
||||
batchFirstBlockTimeoutReached prometheus.Counter
|
||||
batchChunksProposeNotEnoughTotal prometheus.Counter
|
||||
@@ -58,7 +62,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"forkHeights", forkHeights)
|
||||
|
||||
return &BatchProposer{
|
||||
p := &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
@@ -70,6 +74,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
forkMap: forkMap,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_circle_total",
|
||||
@@ -95,6 +100,10 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
Name: "rollup_propose_batch_total_l1_call_data_size",
|
||||
Help: "The total l1 call data size",
|
||||
}),
|
||||
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_commit_blob_size",
|
||||
Help: "The total l1 commit blob size",
|
||||
}),
|
||||
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_chunks_number",
|
||||
Help: "The number of chunks in the batch",
|
||||
@@ -108,22 +117,23 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
Help: "Total number of batch chunk propose not enough",
|
||||
}),
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// TryProposeBatch tries to propose a new batches.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
p.batchProposerCircleTotal.Inc()
|
||||
batch, err := p.proposeBatch()
|
||||
if err != nil {
|
||||
if err := p.proposeBatch(); err != nil {
|
||||
p.proposeBatchFailureTotal.Inc()
|
||||
log.Error("proposeBatchChunks failed", "err", err)
|
||||
return
|
||||
}
|
||||
if batch == nil {
|
||||
return
|
||||
}
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, dbTX)
|
||||
}
|
||||
|
||||
func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion encoding.CodecVersion) error {
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecVersion, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure",
|
||||
"start chunk index", batch.StartChunkIndex, "end chunk index", batch.EndChunkIndex, "error", dbErr)
|
||||
@@ -140,22 +150,23 @@ func (p *BatchProposer) TryProposeBatch() {
|
||||
p.proposeBatchUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update batch info in db failed", "err", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatch() (*encoding.Batch, error) {
|
||||
func (p *BatchProposer) proposeBatch() error {
|
||||
unbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// select at most p.maxChunkNumPerBatch chunks
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dbChunks) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
maxChunksThisBatch := p.maxChunkNumPerBatch
|
||||
@@ -170,124 +181,89 @@ func (p *BatchProposer) proposeBatch() (*encoding.Batch, error) {
|
||||
}
|
||||
}
|
||||
|
||||
daChunks, err := p.getDAChunks(dbChunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
codecVersion := encoding.CodecV0
|
||||
if p.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) {
|
||||
codecVersion = encoding.CodecV1
|
||||
}
|
||||
|
||||
parentDBBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
|
||||
daChunks, err := p.getDAChunks(dbChunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
dbParentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var batch encoding.Batch
|
||||
if parentDBBatch != nil {
|
||||
batch.Index = parentDBBatch.Index + 1
|
||||
parentDABatch, err := codecv0.NewDABatchFromBytes(parentDBBatch.BatchHeader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
batch.TotalL1MessagePoppedBefore = parentDABatch.TotalL1MessagePopped
|
||||
batch.ParentBatchHash = common.HexToHash(parentDBBatch.Hash)
|
||||
batch.Index = dbParentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
|
||||
parentBatchEndBlockNumber := daChunks[0].Blocks[0].Header.Number.Uint64() - 1
|
||||
parentBatchCodecVersion := encoding.CodecV0
|
||||
// Genesis batch uses codecv0 encoding, otherwise using bernoulli fork to choose codec version.
|
||||
if dbParentBatch.Index > 0 && p.chainCfg.IsBernoulli(new(big.Int).SetUint64(parentBatchEndBlockNumber)) {
|
||||
parentBatchCodecVersion = encoding.CodecV1
|
||||
}
|
||||
batch.TotalL1MessagePoppedBefore, err = utils.GetTotalL1MessagePoppedBeforeBatch(dbParentBatch.BatchHeader, parentBatchCodecVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, chunk := range daChunks {
|
||||
batch.Chunks = append(batch.Chunks, chunk)
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
|
||||
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize {
|
||||
if i == 0 {
|
||||
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
dbChunks[0].StartBlockNumber,
|
||||
dbChunks[0].EndBlockNumber,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerBatch,
|
||||
)
|
||||
}
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
dbChunks[0].StartBlockNumber,
|
||||
dbChunks[0].EndBlockNumber,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerBatch,
|
||||
)
|
||||
}
|
||||
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, p.maxChunkNumPerBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"currentL1CommitCalldataSize", metrics.L1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
|
||||
|
||||
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
|
||||
metrics, err := utils.CalculateBatchMetrics(&batch, codecVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.batchChunksNum.Set(float64(batch.NumChunks()))
|
||||
return &batch, nil
|
||||
p.recordBatchMetrics(metrics)
|
||||
return p.updateDBBatchInfo(&batch, codecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec ||
|
||||
batch.NumChunks() == maxChunksThisBatch {
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
"start block timestamp", dbChunks[0].StartBlockTime,
|
||||
"current time", currentTimeSec,
|
||||
)
|
||||
} else {
|
||||
log.Info("reached maximum number of chunks in batch",
|
||||
"chunk count", batch.NumChunks(),
|
||||
)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if metrics.FirstBlockTimestamp+p.batchTimeoutSec < currentTimeSec || metrics.NumChunks == maxChunksThisBatch {
|
||||
log.Info("reached maximum number of chunks in batch or first block timeout",
|
||||
"chunk count", metrics.NumChunks,
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
"start block timestamp", dbChunks[0].StartBlockTime,
|
||||
"current time", currentTimeSec)
|
||||
|
||||
p.batchFirstBlockTimeoutReached.Inc()
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.batchChunksNum.Set(float64(batch.NumChunks()))
|
||||
|
||||
return &batch, nil
|
||||
p.recordBatchMetrics(metrics)
|
||||
return p.updateDBBatchInfo(&batch, codecVersion)
|
||||
}
|
||||
|
||||
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
|
||||
p.batchChunksProposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, error) {
|
||||
@@ -304,3 +280,10 @@ func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, e
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) recordBatchMetrics(metrics *utils.BatchMetrics) {
|
||||
p.totalL1CommitGas.Set(float64(metrics.L1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(metrics.NumChunks))
|
||||
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
|
||||
}
|
||||
|
||||
@@ -2,9 +2,12 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@@ -16,7 +19,7 @@ import (
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func testBatchProposerLimits(t *testing.T) {
|
||||
func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxChunkNum uint64
|
||||
@@ -104,8 +107,31 @@ func testBatchProposerLimits(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -122,8 +148,7 @@ func testBatchProposerLimits(t *testing.T) {
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
|
||||
@@ -141,17 +166,142 @@ func testBatchProposerLimits(t *testing.T) {
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch-1, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, tt.expectedChunksInFirstBatch-1)
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxChunkNum uint64
|
||||
batchTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxChunkNum: 10,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxChunkNum: 10,
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxChunkNumPerBatchIs1",
|
||||
maxChunkNum: 1,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxChunkNum: 10,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
forkBlock: big.NewInt(3),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 1,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: tt.maxChunkNum,
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 1,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
@@ -167,8 +317,31 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -183,8 +356,7 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
|
||||
@@ -200,16 +372,16 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(1), batches[0].EndChunkIndex)
|
||||
assert.Len(t, batches, 2)
|
||||
batches = batches[1:]
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, 2)
|
||||
for _, chunk := range dbChunks {
|
||||
@@ -220,3 +392,76 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
assert.Equal(t, uint64(258383), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(6035), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 1,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for total := int64(0); total < 7; total++ {
|
||||
for i := int64(0); i < 10; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(total*10 + i + 1)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: math.MaxUint64,
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 1,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
bp.TryProposeBatch()
|
||||
}
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
batches = batches[1:]
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 4)
|
||||
for i, batch := range batches {
|
||||
expected := uint64(2 * (i + 1))
|
||||
if expected > 7 {
|
||||
expected = 7
|
||||
}
|
||||
assert.Equal(t, expected, batch.EndChunkIndex)
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user