mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-04-23 03:00:50 -04:00
use disk root
This commit is contained in:
@@ -73,7 +73,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
l2client, err := rpc.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
@@ -68,7 +69,7 @@ func action(ctx *cli.Context) error {
|
||||
observability.Server(ctx, db)
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
l2Client, err := rpc.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
@@ -86,7 +87,7 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch must be greater than 0")
|
||||
}
|
||||
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2Client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
@@ -96,11 +97,11 @@ func action(ctx *cli.Context) error {
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2Client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry)
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations)
|
||||
number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, ethclient.NewClient(l2Client), cfg.L2Config.Confirmations)
|
||||
if loopErr != nil {
|
||||
log.Error("failed to get block number", "err", loopErr)
|
||||
return
|
||||
|
||||
@@ -1,172 +0,0 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
return db
|
||||
}
|
||||
|
||||
// testCreateNewL1Relayer test create new relayer instance and stop
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
}
|
||||
|
||||
func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
|
||||
l1Block := []orm.L1Block{
|
||||
{Hash: "gas-oracle-1", Number: 0, GasOracleStatus: int16(types.GasOraclePending)},
|
||||
{Hash: "gas-oracle-2", Number: 1, GasOracleStatus: int16(types.GasOraclePending)},
|
||||
}
|
||||
// Insert test data.
|
||||
assert.NoError(t, l1BlockOrm.InsertL1Blocks(context.Background(), l1Block))
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
// Simulate message confirmations.
|
||||
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ContextID: "gas-oracle-1",
|
||||
IsSuccessful: true,
|
||||
SenderType: types.SenderTypeL1GasOracle,
|
||||
})
|
||||
l1Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ContextID: "gas-oracle-2",
|
||||
IsSuccessful: false,
|
||||
SenderType: types.SenderTypeL1GasOracle,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
msg1, err1 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-1"})
|
||||
msg2, err2 := l1BlockOrm.GetL1Blocks(ctx, map[string]interface{}{"hash": "gas-oracle-2"})
|
||||
return err1 == nil && len(msg1) == 1 && types.GasOracleStatus(msg1[0].GasOracleStatus) == types.GasOracleImported &&
|
||||
err2 == nil && len(msg2) == 1 && types.GasOracleStatus(msg2[0].GasOracleStatus) == types.GasOracleImportedFailed
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l1Relayer)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
var l1BlockOrm *orm.L1Block
|
||||
convey.Convey("GetLatestL1BlockHeight failure", t, func() {
|
||||
targetErr := errors.New("GetLatestL1BlockHeight error")
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
|
||||
return 0, targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l1BlockOrm, "GetLatestL1BlockHeight", func(ctx context.Context) (uint64, error) {
|
||||
return 100, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
convey.Convey("GetL1Blocks failure", t, func() {
|
||||
targetErr := errors.New("GetL1Blocks error")
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
convey.Convey("Block not exist", t, func() {
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
tmpInfo := []orm.L1Block{
|
||||
{Hash: "gas-oracle-1", Number: 0},
|
||||
{Hash: "gas-oracle-2", Number: 1},
|
||||
}
|
||||
return tmpInfo, nil
|
||||
})
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "GetL1Blocks", func(ctx context.Context, fields map[string]interface{}) ([]orm.L1Block, error) {
|
||||
tmpInfo := []orm.L1Block{
|
||||
{
|
||||
Hash: "gas-oracle-1",
|
||||
Number: 0,
|
||||
GasOracleStatus: int16(types.GasOraclePending),
|
||||
},
|
||||
}
|
||||
return tmpInfo, nil
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.l1GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return []byte("for test"), nil
|
||||
})
|
||||
|
||||
convey.Convey("send transaction failure", t, func() {
|
||||
targetErr := errors.New("send transaction failure")
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob, uint64) (hash common.Hash, err error) {
|
||||
return common.Hash{}, targetErr
|
||||
})
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob, uint64) (hash common.Hash, err error) {
|
||||
return common.Hash{}, nil
|
||||
})
|
||||
|
||||
convey.Convey("UpdateL1GasOracleStatusAndOracleTxHash failure", t, func() {
|
||||
targetErr := errors.New("UpdateL1GasOracleStatusAndOracleTxHash failure")
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "UpdateL1GasOracleStatusAndOracleTxHash", func(context.Context, string, types.GasOracleStatus, string) error {
|
||||
return targetErr
|
||||
})
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "UpdateL1GasOracleStatusAndOracleTxHash", func(context.Context, string, types.GasOracleStatus, string) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
@@ -39,7 +40,8 @@ import (
|
||||
type Layer2Relayer struct {
|
||||
ctx context.Context
|
||||
|
||||
l2Client *ethclient.Client
|
||||
l2RpcClient *rpc.Client
|
||||
l2Client *ethclient.Client
|
||||
|
||||
db *gorm.DB
|
||||
bundleOrm *orm.Bundle
|
||||
@@ -69,7 +71,7 @@ type Layer2Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *rpc.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
var gasOracleSender, commitSender, finalizeSender *sender.Sender
|
||||
var err error
|
||||
|
||||
@@ -136,7 +138,8 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
|
||||
l2Client: l2Client,
|
||||
l2RpcClient: l2Client,
|
||||
l2Client: ethclient.NewClient(l2Client),
|
||||
|
||||
commitSender: commitSender,
|
||||
finalizeSender: finalizeSender,
|
||||
@@ -196,20 +199,20 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
return fmt.Errorf("failed to get batch by index: %v, err: %w", startFinalizedBatchIndex, err)
|
||||
}
|
||||
|
||||
endBlockNumber, err := r.chunkOrm.GetChunkByIndex(r.ctx, startFinalizedBatch.EndChunkIndex)
|
||||
endChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, startFinalizedBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get chunk by index: %v, err: %w", startFinalizedBatch.EndChunkIndex, err)
|
||||
}
|
||||
|
||||
header, err := r.l2Client.HeaderByNumber(r.ctx, new(big.Int).SetUint64(endBlockNumber.EndBlockNumber))
|
||||
diskRoot, err := rutils.GetDiskRoot(r.ctx, r.l2RpcClient, endChunk.EndBlockNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get block by number: %v, err: %w", endBlockNumber.EndBlockNumber, err)
|
||||
return fmt.Errorf("failed to get disk root, block number: %v", endChunk.EndBlockNumber)
|
||||
}
|
||||
|
||||
if err = r.commitGenesisBatch(startFinalizedBatch.Hash, startFinalizedBatch.BatchHeader, header.Root); err != nil {
|
||||
if err = r.commitGenesisBatch(startFinalizedBatch.Hash, startFinalizedBatch.BatchHeader, diskRoot); err != nil {
|
||||
return fmt.Errorf("commit genesis batch failed: %v", err)
|
||||
}
|
||||
log.Info("import genesis transaction successfully", "batch index", startFinalizedBatchIndex, "batch hash", startFinalizedBatch.Hash, "end block number", endBlockNumber.EndBlockNumber, "header root", header.Root.Hex())
|
||||
log.Info("import genesis transaction successfully", "batch index", startFinalizedBatchIndex, "batch hash", startFinalizedBatch.Hash, "end block number", endChunk.EndBlockNumber, "header root", diskRoot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,570 +0,0 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/controller/sender"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
rutils "scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
return db
|
||||
}
|
||||
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
relayer.StopSenders()
|
||||
patchGuard.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBundles(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = bundleOrm.UpdateRollupStatus(context.Background(), bundle.Hash, types.RollupPending)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = bundleOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBundles()
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(bundles))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
|
||||
|
||||
proof := &message.Halo2BundleProof{
|
||||
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBundles()
|
||||
bundles, err = bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(bundles))
|
||||
assert.Equal(t, types.RollupFinalizing, types.RollupStatus(bundles[0].RollupStatus))
|
||||
relayer.StopSenders()
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
relayer.ProcessPendingBundles()
|
||||
|
||||
bundleInDB, bundleErr := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
if bundleErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
bundleStatus := len(bundleInDB) == 1 && types.RollupStatus(bundleInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(bundleInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return bundleStatus && batchStatus && chunkStatus
|
||||
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch or Chunk status did not update as expected")
|
||||
relayer.StopSenders()
|
||||
}
|
||||
}
|
||||
|
||||
func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// Simulate message confirmations.
|
||||
isSuccessful := []bool{true, false}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batch := &encoding.Batch{
|
||||
Index: uint64(i + 1),
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
}
|
||||
|
||||
for i, batchHash := range batchHashes {
|
||||
l2Relayer.commitSender.SendConfirmation(&sender.Confirmation{
|
||||
ContextID: batchHash,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
SenderType: types.SenderTypeCommitBatch,
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupCommitted,
|
||||
types.RollupCommitFailed,
|
||||
}
|
||||
|
||||
for i, batchHash := range batchHashes {
|
||||
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0)
|
||||
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// Simulate message confirmations.
|
||||
isSuccessful := []bool{true, false}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
bundleHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batch := &encoding.Batch{
|
||||
Index: uint64(i + 1),
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV4)
|
||||
assert.NoError(t, err)
|
||||
bundleHashes[i] = bundle.Hash
|
||||
|
||||
err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
for i, bundleHash := range bundleHashes {
|
||||
l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{
|
||||
ContextID: "finalizeBundle-" + bundleHash,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
SenderType: types.SenderTypeFinalizeBatch,
|
||||
})
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupFinalized,
|
||||
types.RollupFinalizeFailed,
|
||||
}
|
||||
|
||||
for i, bundleHash := range bundleHashes {
|
||||
bundleInDB, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundleHash}, nil, 0)
|
||||
if err != nil || len(bundleInDB) != 1 || types.RollupStatus(bundleInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
|
||||
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHashes[i]}, nil, 0)
|
||||
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch status did not update as expected")
|
||||
}
|
||||
|
||||
func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
batch1 := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch2 := &encoding.Batch{
|
||||
Index: batch1.Index + 1,
|
||||
TotalL1MessagePoppedBefore: batch1.TotalL1MessagePoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbBatch1.Hash),
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// Simulate message confirmations.
|
||||
type BatchConfirmation struct {
|
||||
batchHash string
|
||||
isSuccessful bool
|
||||
}
|
||||
|
||||
confirmations := []BatchConfirmation{
|
||||
{batchHash: dbBatch1.Hash, isSuccessful: true},
|
||||
{batchHash: dbBatch2.Hash, isSuccessful: false},
|
||||
}
|
||||
|
||||
for _, confirmation := range confirmations {
|
||||
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ContextID: confirmation.batchHash,
|
||||
IsSuccessful: confirmation.isSuccessful,
|
||||
SenderType: types.SenderTypeL2GasOracle,
|
||||
})
|
||||
}
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleImportedFailed}
|
||||
for i, confirmation := range confirmations {
|
||||
gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0)
|
||||
if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
|
||||
var batchOrm *orm.Batch
|
||||
convey.Convey("Failed to GetLatestBatch", t, func() {
|
||||
targetErr := errors.New("GetLatestBatch error")
|
||||
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
|
||||
batch := orm.Batch{
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
return &batch, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
convey.Convey("Failed to fetch SuggestGasPrice from l2geth", t, func() {
|
||||
targetErr := errors.New("SuggestGasPrice error")
|
||||
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
|
||||
return big.NewInt(100), nil
|
||||
})
|
||||
|
||||
convey.Convey("Failed to pack setL2BaseFee", t, func() {
|
||||
targetErr := errors.New("setL2BaseFee error")
|
||||
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
convey.Convey("Failed to send setL2BaseFee tx to layer2", t, func() {
|
||||
targetErr := errors.New("failed to send setL2BaseFee tx to layer2 error")
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, data []byte, blob *kzg4844.Blob, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.Hash{}, targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, data []byte, blob *kzg4844.Blob, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.HexToHash("0x56789abcdef1234"), nil
|
||||
})
|
||||
|
||||
convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() {
|
||||
targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error")
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
return targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
return nil
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
router := gin.New()
|
||||
r := router.Group("/v1")
|
||||
r.GET("/batch_status", func(ctx *gin.Context) {
|
||||
ctx.JSON(http.StatusOK, struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: true,
|
||||
})
|
||||
})
|
||||
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
|
||||
}
|
||||
|
||||
func testGetBatchStatusByIndex(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV0, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV0, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
status, err := relayer.getBatchStatusByIndex(dbBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, status)
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"math/big"
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/testcontainers"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// l2 block
|
||||
block1 *encoding.Block
|
||||
block2 *encoding.Block
|
||||
|
||||
// chunk
|
||||
chunk1 *encoding.Chunk
|
||||
chunk2 *encoding.Chunk
|
||||
chunkHash1 common.Hash
|
||||
chunkHash2 common.Hash
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
// Load config.
|
||||
var err error
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn, err := testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.DBConfig = &database.Config{
|
||||
DSN: dsn,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
svrPort := strconv.FormatInt(port.Int64()+50000, 10)
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
block1 = &encoding.Block{}
|
||||
err = json.Unmarshal(templateBlockTrace1, block1)
|
||||
assert.NoError(t, err)
|
||||
chunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
daChunk1, err := codec.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
block2 = &encoding.Block{}
|
||||
err = json.Unmarshal(templateBlockTrace2, block2)
|
||||
assert.NoError(t, err)
|
||||
chunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
daChunk2, err := codec.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, err)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestFunctions(t *testing.T) {
|
||||
setupEnv(t)
|
||||
srv, err := mockChainMonitorServer(cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL)
|
||||
assert.NoError(t, err)
|
||||
defer srv.Close()
|
||||
|
||||
// Run l1 relayer test cases.
|
||||
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("TestL1RelayerGasOracleConfirm", testL1RelayerGasOracleConfirm)
|
||||
t.Run("TestL1RelayerProcessGasPriceOracle", testL1RelayerProcessGasPriceOracle)
|
||||
|
||||
// Run l2 relayer test cases.
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("TestL2RelayerProcessPendingBundles", testL2RelayerProcessPendingBundles)
|
||||
t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles)
|
||||
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
|
||||
t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
|
||||
// test getBatchStatusByIndex
|
||||
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)
|
||||
}
|
||||
@@ -1,425 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testBatchProposerLimitsCodecV4(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIs0",
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIs0",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 249179,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{
|
||||
LondonBlock: big.NewInt(0),
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{
|
||||
LondonBlock: big.NewInt(0),
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
batches = batches[1:]
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, 2)
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(209350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchProposerBlobSizeLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
blockHeight := int64(0)
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for total := int64(0); total < 90; total++ {
|
||||
for i := int64(0); i < 30; i++ {
|
||||
blockHeight++
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(blockHeight)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
bp.TryProposeBatch()
|
||||
}
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
batches = batches[1:]
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedNumBatches int
|
||||
var numChunksMultiplier uint64
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 45
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
assert.Len(t, batches, expectedNumBatches)
|
||||
|
||||
for i, batch := range batches {
|
||||
assert.Equal(t, numChunksMultiplier*(uint64(i)+1), batch.EndChunkIndex)
|
||||
}
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerMaxChunkNumPerBatchLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedChunkNum uint64
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
expectedChunkNum = 45
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for blockHeight := int64(1); blockHeight <= 60; blockHeight++ {
|
||||
block.Header.Number = big.NewInt(blockHeight)
|
||||
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
dbBatch := batches[1]
|
||||
|
||||
assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex)
|
||||
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testBundleProposerLimitsCodecV4(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBatchNumPerBundle uint64
|
||||
bundleTimeoutSec uint64
|
||||
expectedBundlesLen int
|
||||
expectedBatchesInFirstBundle uint64 // only be checked when expectedBundlesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBatchNumPerBundle: math.MaxUint64,
|
||||
bundleTimeoutSec: math.MaxUint32,
|
||||
expectedBundlesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBatchNumPerBundle: math.MaxUint64,
|
||||
bundleTimeoutSec: 0,
|
||||
expectedBundlesLen: 1,
|
||||
expectedBatchesInFirstBundle: 2,
|
||||
},
|
||||
{
|
||||
name: "maxBatchNumPerBundleIs0",
|
||||
maxBatchNumPerBundle: 0,
|
||||
bundleTimeoutSec: math.MaxUint32,
|
||||
expectedBundlesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "maxBatchNumPerBundleIs1",
|
||||
maxBatchNumPerBundle: 1,
|
||||
bundleTimeoutSec: math.MaxUint32,
|
||||
expectedBundlesLen: 1,
|
||||
expectedBatchesInFirstBundle: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
bap.TryProposeBatch() // batch1 contains chunk1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
bap.TryProposeBatch() // batch2 contains chunk2
|
||||
|
||||
bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: tt.maxBatchNumPerBundle,
|
||||
BundleTimeoutSec: tt.bundleTimeoutSec,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bup.TryProposeBundle()
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, bundles, tt.expectedBundlesLen)
|
||||
if tt.expectedBundlesLen > 0 {
|
||||
assert.Equal(t, uint64(1), bundles[0].StartBatchIndex)
|
||||
assert.Equal(t, tt.expectedBatchesInFirstBundle, bundles[0].EndBatchIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(bundles[0].ProvingStatus))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,245 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func testChunkProposerLimitsCodecV4(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
maxTxNum uint64
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 0,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 0,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs0",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxBlockNumPerChunkIs1",
|
||||
maxBlockNum: 1,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 2,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 62500,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs1",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxTxNumPerChunk: tt.maxTxNum,
|
||||
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, tt.expectedChunksLen)
|
||||
|
||||
if len(chunks) > 0 {
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk)
|
||||
firstChunkHash := chunks[0].Hash
|
||||
for _, chunkHash := range chunkHashes {
|
||||
assert.Equal(t, firstChunkHash, chunkHash)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerBlobSizeLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for i := int64(0); i < 510; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(i + 1)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 255,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedNumChunks int = 2
|
||||
var numBlocksMultiplier uint64
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
numBlocksMultiplier = 255
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
assert.Len(t, chunks, expectedNumChunks)
|
||||
|
||||
for i, chunk := range chunks {
|
||||
expected := numBlocksMultiplier * (uint64(i) + 1)
|
||||
if expected > 2000 {
|
||||
expected = 2000
|
||||
}
|
||||
assert.Equal(t, expected, chunk.EndBlockNumber)
|
||||
}
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
client, err := testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, db, nil)
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
func testL1WatcherClientFetchBlockHeader(t *testing.T) {
|
||||
watcher, db := setupL1Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
convey.Convey("test toBlock < fromBlock", t, func() {
|
||||
var blockHeight uint64
|
||||
if watcher.ProcessedBlockHeight() <= 0 {
|
||||
blockHeight = 0
|
||||
} else {
|
||||
blockHeight = watcher.ProcessedBlockHeight() - 1
|
||||
}
|
||||
err := watcher.FetchBlockHeader(blockHeight)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("test get header from client error", t, func() {
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
|
||||
return nil, ethereum.NotFound
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
var blockHeight uint64 = 10
|
||||
err := watcher.FetchBlockHeader(blockHeight)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
var l1BlockOrm *orm.L1Block
|
||||
convey.Convey("insert l1 block error", t, func() {
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
|
||||
if height == nil {
|
||||
height = big.NewInt(100)
|
||||
}
|
||||
t.Log(height)
|
||||
return &types.Header{
|
||||
BaseFee: big.NewInt(100),
|
||||
}, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "InsertL1Blocks", func(ctx context.Context, blocks []orm.L1Block) error {
|
||||
return errors.New("insert failed")
|
||||
})
|
||||
|
||||
var blockHeight uint64 = 10
|
||||
err := watcher.FetchBlockHeader(blockHeight)
|
||||
assert.Error(t, err)
|
||||
})
|
||||
|
||||
convey.Convey("fetch block header success", t, func() {
|
||||
var c *ethclient.Client
|
||||
patchGuard := gomonkey.ApplyMethodFunc(c, "HeaderByNumber", func(ctx context.Context, height *big.Int) (*types.Header, error) {
|
||||
if height == nil {
|
||||
height = big.NewInt(100)
|
||||
}
|
||||
t.Log(height)
|
||||
return &types.Header{
|
||||
BaseFee: big.NewInt(100),
|
||||
}, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1BlockOrm, "InsertL1Blocks", func(ctx context.Context, blocks []orm.L1Block) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
var blockHeight uint64 = 10
|
||||
err := watcher.FetchBlockHeader(blockHeight)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
@@ -26,6 +27,7 @@ type L2WatcherClient struct {
|
||||
event.Feed
|
||||
|
||||
*ethclient.Client
|
||||
l2RpcClient *rpc.Client
|
||||
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
@@ -40,10 +42,11 @@ type L2WatcherClient struct {
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *rpc.Client, confirmations rpc.BlockNumber, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
|
||||
return &L2WatcherClient{
|
||||
ctx: ctx,
|
||||
Client: client,
|
||||
ctx: ctx,
|
||||
Client: ethclient.NewClient(client),
|
||||
l2RpcClient: client,
|
||||
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
|
||||
@@ -141,6 +144,13 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
|
||||
if err3 != nil {
|
||||
return fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
|
||||
header := block.Header()
|
||||
header.Root, err = utils.GetDiskRoot(w.ctx, w.l2RpcClient, block.Number().Uint64())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get disk root, block number: %v", block.Number().Uint64())
|
||||
}
|
||||
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: block.Header(),
|
||||
Transactions: txsToTxsData(block.Transactions()),
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
l2cfg := cfg.L2Config
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, nil, db, nil)
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
ok := cutils.TryTimes(10, func() bool {
|
||||
latestHeight, err := l2Cli.BlockNumber(context.Background())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
wc := prepareWatcherClient(l2Cli, db)
|
||||
wc.TryFetchRunningMissingBlocks(latestHeight)
|
||||
fetchedHeight, err := l2BlockOrm.GetL2BlocksLatestHeight(context.Background())
|
||||
return err == nil && fetchedHeight == latestHeight
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB) *L2WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, common.Address{}, common.Hash{}, nil, db, nil)
|
||||
}
|
||||
@@ -1,123 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/testcontainers"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
)
|
||||
|
||||
var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
testApps *testcontainers.TestcontainerApps
|
||||
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
|
||||
// block trace
|
||||
block1 *encoding.Block
|
||||
block2 *encoding.Block
|
||||
)
|
||||
|
||||
func setupEnv(t *testing.T) (err error) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
// Load config.
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
testApps = testcontainers.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetL2GethEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dsn, err := testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
cfg.DBConfig = &database.Config{
|
||||
DSN: dsn,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
|
||||
// Create l2geth client.
|
||||
l2Cli, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
|
||||
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
block2 = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
return db
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
if err := setupEnv(t); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Run l1 watcher test cases.
|
||||
t.Run("TestL1WatcherClientFetchBlockHeader", testL1WatcherClientFetchBlockHeader)
|
||||
|
||||
// Run l2 watcher test cases.
|
||||
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
|
||||
|
||||
// Run chunk proposer test cases.
|
||||
t.Run("TestChunkProposerLimitsCodecV4", testChunkProposerLimitsCodecV4)
|
||||
t.Run("TestChunkProposerBlobSizeLimitCodecV4", testChunkProposerBlobSizeLimitCodecV4)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerLimitsCodecV4", testBatchProposerLimitsCodecV4)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeEstimationCodecV4", testBatchCommitGasAndCalldataSizeEstimationCodecV4)
|
||||
t.Run("TestBatchProposerBlobSizeLimitCodecV4", testBatchProposerBlobSizeLimitCodecV4)
|
||||
t.Run("TestBatchProposerMaxChunkNumPerBatchLimitCodecV4", testBatchProposerMaxChunkNumPerBatchLimitCodecV4)
|
||||
|
||||
// Run bundle proposer test cases.
|
||||
t.Run("TestBundleProposerLimitsCodecV4", testBundleProposerLimitsCodecV4)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
data, err := os.ReadFile(filename)
|
||||
assert.NoError(t, err)
|
||||
|
||||
block := &encoding.Block{}
|
||||
assert.NoError(t, json.Unmarshal(data, block))
|
||||
return block
|
||||
}
|
||||
@@ -1,11 +1,15 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// ChunkMetrics indicates the metrics for proposing a chunk.
|
||||
@@ -221,3 +225,13 @@ func measureTime(operation func() error) (time.Duration, error) {
|
||||
err := operation()
|
||||
return time.Since(start), err
|
||||
}
|
||||
|
||||
// GetDiskRoot retrieves the disk root for a given block number from the Ethereum node.
|
||||
func GetDiskRoot(ctx context.Context, cli *rpc.Client, blockNumber uint64) (common.Hash, error) {
|
||||
var diskRoot common.Hash
|
||||
err := cli.CallContext(ctx, &diskRoot, "scroll_diskRoot", hexutil.EncodeBig(big.NewInt(0).SetUint64(blockNumber)))
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to fetch disk root for block %d: %w", blockNumber, err)
|
||||
}
|
||||
return diskRoot, nil
|
||||
}
|
||||
|
||||
@@ -1,217 +0,0 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
tc "scroll-tech/common/testcontainers"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
bcmd "scroll-tech/rollup/cmd"
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
)
|
||||
|
||||
var (
|
||||
testApps *tc.TestcontainerApps
|
||||
rollupApp *bcmd.MockApp
|
||||
|
||||
// clients
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
)
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
dsn, err := testApps.GetDBEndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg := &database.Config{
|
||||
DSN: dsn,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
db, err := database.InitDB(cfg)
|
||||
assert.NoError(t, err)
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
return db
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
if testApps != nil {
|
||||
testApps.Free()
|
||||
}
|
||||
if rollupApp != nil {
|
||||
rollupApp.Free()
|
||||
}
|
||||
}()
|
||||
m.Run()
|
||||
}
|
||||
|
||||
func setupEnv(t *testing.T) {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
var (
|
||||
err error
|
||||
l1GethChainID *big.Int
|
||||
l2GethChainID *big.Int
|
||||
)
|
||||
|
||||
testApps = tc.NewTestcontainerApps()
|
||||
assert.NoError(t, testApps.StartPostgresContainer())
|
||||
assert.NoError(t, testApps.StartL2GethContainer())
|
||||
assert.NoError(t, testApps.StartPoSL1Container())
|
||||
rollupApp = bcmd.NewRollupApp(testApps, "../conf/config.json")
|
||||
|
||||
l1Client, err = testApps.GetPoSL1Client()
|
||||
assert.NoError(t, err)
|
||||
l2Client, err = testApps.GetL2GethClient()
|
||||
assert.NoError(t, err)
|
||||
l1GethChainID, err = l1Client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l2GethChainID, err = l2Client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Cfg, l2Cfg := rollupApp.Config.L1Config, rollupApp.Config.L2Config
|
||||
l1Cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
l2Cfg.Confirmations = 0
|
||||
l2Cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
|
||||
pKey, err := crypto.ToECDSA(common.FromHex(l2Cfg.RelayerConfig.CommitSenderSignerConfig.PrivateKeySignerConfig.PrivateKey))
|
||||
assert.NoError(t, err)
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(pKey, l1GethChainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pKey, err = crypto.ToECDSA(common.FromHex(l2Cfg.RelayerConfig.GasOracleSenderSignerConfig.PrivateKeySignerConfig.PrivateKey))
|
||||
assert.NoError(t, err)
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(pKey, l2GethChainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
svrPort := strconv.FormatInt(port.Int64()+40000, 10)
|
||||
l2Cfg.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
router := gin.New()
|
||||
r := router.Group("/v1")
|
||||
r.GET("/batch_status", func(ctx *gin.Context) {
|
||||
ctx.JSON(http.StatusOK, struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: true,
|
||||
})
|
||||
})
|
||||
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
|
||||
}
|
||||
|
||||
func prepareContracts(t *testing.T) {
|
||||
// L1 ScrolChain contract
|
||||
nonce, err := l1Client.PendingNonceAt(context.Background(), l1Auth.From)
|
||||
assert.NoError(t, err)
|
||||
mockL1ContractAddress := crypto.CreateAddress(l1Auth.From, nonce)
|
||||
tx := types.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err := l1Auth.Signer(l1Auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
err = l1Client.SendTransaction(context.Background(), signedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
_, isPending, getErr := l1Client.TransactionByHash(context.Background(), signedTx.Hash())
|
||||
return getErr == nil && !isPending
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
return getErr == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
code, getErr := l1Client.CodeAt(context.Background(), mockL1ContractAddress, nil)
|
||||
return getErr == nil && len(code) > 0
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
// L2 ScrolChain contract
|
||||
nonce, err = l2Client.PendingNonceAt(context.Background(), l2Auth.From)
|
||||
assert.NoError(t, err)
|
||||
mockL2ContractAddress := crypto.CreateAddress(l2Auth.From, nonce)
|
||||
tx = types.NewContractCreation(nonce, big.NewInt(0), 2000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err = l2Auth.Signer(l2Auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
err = l2Client.SendTransaction(context.Background(), signedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
_, isPending, err := l2Client.TransactionByHash(context.Background(), signedTx.Hash())
|
||||
return err == nil && !isPending
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
receipt, err := l2Client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
code, err := l2Client.CodeAt(context.Background(), mockL2ContractAddress, nil)
|
||||
return err == nil && len(code) > 0
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
l1Config, l2Config := rollupApp.Config.L1Config, rollupApp.Config.L2Config
|
||||
l2Config.RelayerConfig.RollupContractAddress = mockL1ContractAddress
|
||||
|
||||
l2Config.RelayerConfig.GasPriceOracleContractAddress = mockL1ContractAddress
|
||||
l1Config.RelayerConfig.GasPriceOracleContractAddress = mockL2ContractAddress
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
setupEnv(t)
|
||||
srv, err := mockChainMonitorServer(rollupApp.Config.L2Config.RelayerConfig.ChainMonitor.BaseURL)
|
||||
assert.NoError(t, err)
|
||||
defer srv.Close()
|
||||
|
||||
// process start test
|
||||
t.Run("TestProcessStart", testProcessStart)
|
||||
t.Run("TestProcessStartEnableMetrics", testProcessStartEnableMetrics)
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
|
||||
t.Run("testCommitBatchAndFinalizeBundleCodecV4V5V6", testCommitBatchAndFinalizeBundleCodecV4V5V6)
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
t.Run("TestImportDefaultL1GasPriceDueToL1GasPriceSpike", testImportDefaultL1GasPriceDueToL1GasPriceSpike)
|
||||
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
|
||||
}
|
||||
@@ -1,256 +0,0 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testImportL1GasPrice(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, relayer.ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
assert.Greater(t, number, startHeight-1)
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchBlockHeader(number)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
// check db status
|
||||
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number, latestBlockHeight)
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// add fake batch to pass check for commit batch timeout
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// relay gas price
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.NotEmpty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
l1CfgCopy := *l1Cfg
|
||||
// set CheckCommittedBatchesWindowMinutes to zero to not pass check for commit batch timeout
|
||||
l1CfgCopy.RelayerConfig.GasOracleConfig.CheckCommittedBatchesWindowMinutes = 0
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1CfgCopy.RelayerConfig, relayer.ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-2, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
assert.Greater(t, number-1, startHeight-2)
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchBlockHeader(number - 1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
// check db status
|
||||
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number-1, latestBlockHeight)
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// add fake batch
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
batch0 := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch0, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// relay gas price
|
||||
// gas price will be relayed to some default value because we didn't commit batches for a l1CfgCopy.RelayerConfig.GasOracleConfig.CheckCommittedBatchesWindowMinutes = 0 minutes
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.NotEmpty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
|
||||
// fetch new blocks
|
||||
err = l1Watcher.FetchBlockHeader(number)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
latestBlockHeight, err = l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number, latestBlockHeight)
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// relay gas price
|
||||
// gas price should not be relayed one more time because previously we already set it do default value
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
}
|
||||
|
||||
func testImportL2GasPrice(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, false, relayer.ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// add fake chunk
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
dbBatch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.Empty(t, dbBatch.OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(dbBatch.OracleStatus), types.GasOraclePending)
|
||||
|
||||
// relay gas price
|
||||
l2Relayer.ProcessGasPriceOracle()
|
||||
dbBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, dbBatch.OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(dbBatch.OracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
_ "scroll-tech/rollup/cmd/gas_oracle/app"
|
||||
_ "scroll-tech/rollup/cmd/rollup_relayer/app"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
cutils "scroll-tech/common/utils"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testProcessStart(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
rollupApp.RunApp(t, cutils.GasOracleApp, "--genesis", "../conf/genesis.json")
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json", "--min-codec-version", "4")
|
||||
|
||||
rollupApp.WaitExit()
|
||||
}
|
||||
|
||||
func testProcessStartEnableMetrics(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
svrPort := strconv.FormatInt(port.Int64()+20000, 10)
|
||||
rollupApp.RunApp(t, cutils.GasOracleApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json")
|
||||
|
||||
port, err = rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
svrPort = strconv.FormatInt(port.Int64()+30000, 10)
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json", "--min-codec-version", "4")
|
||||
|
||||
rollupApp.WaitExit()
|
||||
}
|
||||
@@ -1,213 +0,0 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func testCommitAndFinalizeGenesisBatch(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l2Relayer)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
genesisChunkHash := common.HexToHash("0x00e076380b00a3749816fcc9a2a576b529952ef463222a54544d21b7d434dfe1")
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
dbChunk, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunk, 1)
|
||||
assert.Equal(t, genesisChunkHash.String(), dbChunk[0].Hash)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(dbChunk[0].ProvingStatus))
|
||||
|
||||
genesisBatchHash := common.HexToHash("0x2d214b024f5337d83a5681f88575ab225f345ec2e4e3ce53cf4dc4b0cb5c96b1")
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, genesisBatchHash.String(), batch.Hash)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(batch.ProvingStatus))
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus))
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBundleCodecV4V5V6(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
euclidTime := uint64(3)
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: &euclidTime}
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add some blocks to db
|
||||
var blocks []*encoding.Block
|
||||
for i := int64(0); i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(i + 1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
Time: uint64(i),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: 1000000,
|
||||
BundleTimeoutSec: 300,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[:5])
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
bap.TryProposeBatch()
|
||||
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[5:])
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
bap.TryProposeBatch()
|
||||
|
||||
bup.TryProposeBundle() // The proposed bundle contains two batches when codec version is codecv3.
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, getErr)
|
||||
assert.Len(t, batches, 3)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
batchProof := &message.Halo2BatchProof{
|
||||
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
bundleProof := &message.Halo2BundleProof{
|
||||
RawProof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
for _, bundle := range bundles {
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
l2Relayer.ProcessPendingBundles()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 3)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, getErr)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
}
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, bundles, 1)
|
||||
|
||||
bundle := bundles[0]
|
||||
if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, bundle.FinalizeTxHash)
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
for _, batch := range batches {
|
||||
assert.Equal(t, batch.RollupStatus, bundle.RollupStatus)
|
||||
assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash)
|
||||
}
|
||||
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
l2Relayer.StopSenders()
|
||||
database.CloseDB(db)
|
||||
}
|
||||
Reference in New Issue
Block a user