mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
18 Commits
feat-deter
...
v1.2.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
86425774c3 | ||
|
|
c40c2a7dc8 | ||
|
|
e5226ae5f4 | ||
|
|
329047b782 | ||
|
|
5fcc831f51 | ||
|
|
b9168fe4b2 | ||
|
|
eb74b18354 | ||
|
|
f911e4533a | ||
|
|
9038ea6b97 | ||
|
|
559a17405c | ||
|
|
eceeddaa50 | ||
|
|
c831e1dec1 | ||
|
|
e59e0e65ce | ||
|
|
10ad42cf4d | ||
|
|
7fea64a346 | ||
|
|
04d6e0afc6 | ||
|
|
c3af1cd79d | ||
|
|
3b42875ea4 |
@@ -19,7 +19,7 @@ CAPELLA_FORK_VERSION: 0x20000092
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD: 16
|
||||
|
||||
# Deneb
|
||||
DENEB_FORK_EPOCH: 1
|
||||
DENEB_FORK_EPOCH: 0
|
||||
DENEB_FORK_VERSION: 0x20000093
|
||||
|
||||
# Time parameters
|
||||
|
||||
@@ -19,7 +19,7 @@ services:
|
||||
command:
|
||||
- testnet
|
||||
- generate-genesis
|
||||
- --fork=capella
|
||||
- --fork=deneb
|
||||
- --num-validators=64
|
||||
- --genesis-time-delay=3
|
||||
- --output-ssz=/data/consensus/genesis.ssz
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
"archimedesBlock": 0,
|
||||
"shanghaiBlock": 0,
|
||||
"clique": {
|
||||
"period": 3,
|
||||
"period": 1,
|
||||
"epoch": 30000
|
||||
},
|
||||
"scroll": {
|
||||
|
||||
@@ -26,6 +26,7 @@ func CollectSortedForkHeights(config *params.ChainConfig) ([]uint64, map[uint64]
|
||||
config.ArrowGlacierBlock,
|
||||
config.ArchimedesBlock,
|
||||
config.ShanghaiBlock,
|
||||
config.BanachBlock,
|
||||
} {
|
||||
if fork == nil {
|
||||
continue
|
||||
|
||||
@@ -442,9 +442,9 @@ func EstimateBatchL1CommitGas(b *encoding.Batch) (uint64, error) {
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitCalldataSize calculates the calldata size in l1 commit for this batch approximately.
|
||||
func EstimateBatchL1CommitCalldataSize(c *encoding.Batch) (uint64, error) {
|
||||
func EstimateBatchL1CommitCalldataSize(b *encoding.Batch) (uint64, error) {
|
||||
var totalL1CommitCalldataSize uint64
|
||||
for _, chunk := range c.Chunks {
|
||||
for _, chunk := range b.Chunks {
|
||||
chunkL1CommitCalldataSize, err := EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -443,8 +443,55 @@ func (b *DABatch) BlobDataProof() ([]byte, error) {
|
||||
return BlobDataProofArgs.Pack(values...)
|
||||
}
|
||||
|
||||
// Blob returns the blob of the batch.
|
||||
func (b *DABatch) Blob() *kzg4844.Blob {
|
||||
return b.blob
|
||||
}
|
||||
|
||||
// DecodeFromCalldata attempts to decode a DABatch and an array of DAChunks from the provided calldata byte slice.
|
||||
func DecodeFromCalldata(data []byte) (*DABatch, []*DAChunk, error) {
|
||||
// TODO: implement this function.
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// EstimateChunkL1CommitBlobSize estimates the size of the L1 commit blob for a single chunk.
|
||||
func EstimateChunkL1CommitBlobSize(c *encoding.Chunk) (uint64, error) {
|
||||
metadataSize := uint64(2 + 4*MaxNumChunks) // over-estimate: adding metadata length
|
||||
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
paddedSize := ((metadataSize + chunkDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
}
|
||||
|
||||
// EstimateBatchL1CommitBlobSize estimates the total size of the L1 commit blob for a batch.
|
||||
func EstimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, error) {
|
||||
metadataSize := uint64(2 + 4*MaxNumChunks)
|
||||
var batchDataSize uint64
|
||||
for _, c := range b.Chunks {
|
||||
chunkDataSize, err := chunkL1CommitBlobDataSize(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
batchDataSize += chunkDataSize
|
||||
}
|
||||
paddedSize := ((metadataSize + batchDataSize + 30) / 31) * 32
|
||||
return paddedSize, nil
|
||||
}
|
||||
|
||||
func chunkL1CommitBlobDataSize(c *encoding.Chunk) (uint64, error) {
|
||||
var dataSize uint64
|
||||
for _, block := range c.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dataSize += uint64(len(rlpTxData))
|
||||
}
|
||||
}
|
||||
}
|
||||
return dataSize, nil
|
||||
}
|
||||
|
||||
@@ -687,6 +687,51 @@ func TestCodecV1BatchSkipBitmap(t *testing.T) {
|
||||
assert.Equal(t, 42, int(batch.TotalL1MessagePopped))
|
||||
}
|
||||
|
||||
func TestCodecV1ChunkAndBatchBlobSizeEstimation(t *testing.T) {
|
||||
trace2 := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{trace2}}
|
||||
chunk2BlobSize, err := EstimateChunkL1CommitBlobSize(chunk2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(320), chunk2BlobSize)
|
||||
batch2 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
|
||||
batch2BlobSize, err := EstimateBatchL1CommitBlobSize(batch2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(320), batch2BlobSize)
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
chunk3BlobSize, err := EstimateChunkL1CommitBlobSize(chunk3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(5952), chunk3BlobSize)
|
||||
batch3 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
|
||||
batch3BlobSize, err := EstimateBatchL1CommitBlobSize(batch3)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(5952), batch3BlobSize)
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk4BlobSize, err := EstimateChunkL1CommitBlobSize(chunk4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), chunk4BlobSize)
|
||||
batch4 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
|
||||
batch4BlobSize, err := EstimateBatchL1CommitBlobSize(batch4)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), batch4BlobSize)
|
||||
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3}}
|
||||
chunk5BlobSize, err := EstimateChunkL1CommitBlobSize(chunk5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6176), chunk5BlobSize)
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
chunk6BlobSize, err := EstimateChunkL1CommitBlobSize(chunk6)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(128), chunk6BlobSize)
|
||||
batch5 := &encoding.Batch{Chunks: []*encoding.Chunk{chunk5, chunk6}}
|
||||
batch5BlobSize, err := EstimateBatchL1CommitBlobSize(batch5)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6208), batch5BlobSize)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
data, err := os.ReadFile(filename)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -8,6 +8,17 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// CodecVersion defines the version of encoder and decoder.
|
||||
type CodecVersion int
|
||||
|
||||
const (
|
||||
// CodecV0 represents the version 0 of the encoder and decoder.
|
||||
CodecV0 CodecVersion = iota
|
||||
|
||||
// CodecV1 represents the version 1 of the encoder and decoder.
|
||||
CodecV1
|
||||
)
|
||||
|
||||
// Block represents an L2 block.
|
||||
type Block struct {
|
||||
Header *types.Header
|
||||
@@ -209,8 +220,3 @@ func (b *Batch) WithdrawRoot() common.Hash {
|
||||
lastChunkBlockNum := len(b.Chunks[numChunks-1].Blocks)
|
||||
return b.Chunks[len(b.Chunks)-1].Blocks[lastChunkBlockNum-1].WithdrawRoot
|
||||
}
|
||||
|
||||
// NumChunks gets the number of chunks of the batch.
|
||||
func (b *Batch) NumChunks() uint64 {
|
||||
return uint64(len(b.Chunks))
|
||||
}
|
||||
|
||||
@@ -75,7 +75,6 @@ func TestUtilFunctions(t *testing.T) {
|
||||
assert.Equal(t, uint64(240000), chunk3.L2GasUsed())
|
||||
|
||||
// Test Batch methods
|
||||
assert.Equal(t, uint64(3), batch.NumChunks())
|
||||
assert.Equal(t, block6.Header.Root, batch.StateRoot())
|
||||
assert.Equal(t, block6.WithdrawRoot, batch.WithdrawRoot())
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.74"
|
||||
var tag = "v4.3.75"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -159,22 +159,6 @@ func (o *Batch) GetProvingStatusByHash(ctx context.Context, hash string) (types.
|
||||
return types.ProvingStatus(batch.ProvingStatus), nil
|
||||
}
|
||||
|
||||
// GetLatestBatch retrieves the latest batch from the database.
|
||||
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Order("index desc")
|
||||
|
||||
var latestBatch Batch
|
||||
if err := db.First(&latestBatch).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
|
||||
}
|
||||
return &latestBatch, nil
|
||||
}
|
||||
|
||||
// GetAttemptsByHash get batch attempts by hash. Used by unit test
|
||||
func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int16, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -187,6 +171,19 @@ func (o *Batch) GetAttemptsByHash(ctx context.Context, hash string) (int16, int1
|
||||
return batch.ActiveAttempts, batch.TotalAttempts, nil
|
||||
}
|
||||
|
||||
// GetBatchByIndex retrieves the batch by the given index.
|
||||
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("index = ?", index)
|
||||
|
||||
var batch Batch
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if batch == nil {
|
||||
@@ -207,17 +204,13 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
}
|
||||
|
||||
var startChunkIndex uint64
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
|
||||
// no batch record in the db, we then use default empty values for the creating batch;
|
||||
// if parentBatch!=nil then err==nil, then we fill the parentBatch-related data into the creating batch
|
||||
if parentBatch != nil {
|
||||
if batch.Index > 0 {
|
||||
parentBatch, getErr := o.GetBatchByIndex(ctx, batch.Index-1)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get batch by index", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", getErr)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", getErr)
|
||||
}
|
||||
startChunkIndex = parentBatch.EndChunkIndex + 1
|
||||
}
|
||||
|
||||
|
||||
@@ -161,9 +161,6 @@ func (o *Chunk) getLatestChunk(ctx context.Context) (*Chunk, error) {
|
||||
|
||||
var latestChunk Chunk
|
||||
if err := db.First(&latestChunk).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Chunk.getLatestChunk error: %w", err)
|
||||
}
|
||||
return &latestChunk, nil
|
||||
@@ -234,7 +231,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
var parentChunkHash string
|
||||
var parentChunkStateRoot string
|
||||
parentChunk, err := o.getLatestChunk(ctx)
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Error("failed to get latest chunk", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
@@ -251,7 +248,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
|
||||
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("failed to initialize new DA chunk", "err", err)
|
||||
log.Error("failed to create DA chunk", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
@@ -261,18 +258,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit gas", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
numBlocks := len(chunk.Blocks)
|
||||
newChunk := Chunk{
|
||||
Index: chunkIndex,
|
||||
@@ -281,10 +266,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
|
||||
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: chunk.L2GasUsed(),
|
||||
TotalL2TxNum: chunk.NumL2Transactions(),
|
||||
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
StartBlockTime: chunk.Blocks[0].Header.Time,
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -83,7 +84,7 @@ func action(ctx *cli.Context) error {
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
@@ -72,18 +72,18 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
genesisPath := ctx.String(utils.Genesis.Name)
|
||||
genesis, err := config.ReadGenesis(genesisPath)
|
||||
if err != nil {
|
||||
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, genesis.Config, db, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create chunkProposer", "config file", cfgFile, "error", err)
|
||||
|
||||
@@ -10,11 +10,10 @@
|
||||
"sender_config": {
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"escalate_blocks": 1,
|
||||
"confirmations": "0x1",
|
||||
"confirmations": "0x0",
|
||||
"escalate_multiple_num": 2,
|
||||
"escalate_multiple_den": 1,
|
||||
"max_gas_price": 1000000000000,
|
||||
"max_blob_gas_price": 10000000000000,
|
||||
"tx_type": "LegacyTx",
|
||||
"check_pending_time": 1
|
||||
},
|
||||
@@ -35,7 +34,7 @@
|
||||
"sender_config": {
|
||||
"endpoint": "https://rpc.ankr.com/eth",
|
||||
"escalate_blocks": 1,
|
||||
"confirmations": "0x6",
|
||||
"confirmations": "0x0",
|
||||
"escalate_multiple_num": 2,
|
||||
"escalate_multiple_den": 1,
|
||||
"max_gas_price": 1000000000000,
|
||||
|
||||
@@ -3,6 +3,7 @@ package relayer
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
"time"
|
||||
@@ -13,13 +14,17 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
"scroll-tech/common/types/encoding/codecv1"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
@@ -61,10 +66,12 @@ type Layer2Relayer struct {
|
||||
chainMonitorClient *resty.Client
|
||||
|
||||
metrics *l2RelayerMetrics
|
||||
|
||||
banachForkHeight uint64
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
var gasOracleSender, commitSender, finalizeSender *sender.Sender
|
||||
var err error
|
||||
|
||||
@@ -136,6 +143,15 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
// If BanachBlock is not set in chain's genesis config, banachForkHeight is inf,
|
||||
// which means rollup-relayer uses codecv0 by default.
|
||||
// TODO: Must change it to real fork name.
|
||||
if chainCfg.BanachBlock != nil {
|
||||
layer2Relayer.banachForkHeight = chainCfg.BanachBlock.Uint64()
|
||||
} else {
|
||||
layer2Relayer.banachForkHeight = math.MaxUint64
|
||||
}
|
||||
|
||||
// chain_monitor client
|
||||
if cfg.ChainMonitor.Enabled {
|
||||
layer2Relayer.chainMonitorClient = resty.New()
|
||||
@@ -189,7 +205,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
var dbChunk *orm.Chunk
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, dbTX)
|
||||
dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, encoding.CodecV0, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert chunk: %v", err)
|
||||
}
|
||||
@@ -206,7 +222,7 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
}
|
||||
|
||||
var dbBatch *orm.Batch
|
||||
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, dbTX)
|
||||
dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
@@ -239,9 +255,9 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte, stateRoot common.Hash) error {
|
||||
// encode "importGenesisBatch" transaction calldata
|
||||
calldata, err := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, err)
|
||||
calldata, packErr := r.l1RollupABI.Pack("importGenesisBatch", batchHeader, stateRoot)
|
||||
if packErr != nil {
|
||||
return fmt.Errorf("failed to pack importGenesisBatch with batch header: %v and state root: %v. error: %v", common.Bytes2Hex(batchHeader), stateRoot, packErr)
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
@@ -284,8 +300,8 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if batch == nil || err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -330,95 +346,78 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
batches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
|
||||
dbBatches, err := r.batchOrm.GetFailedAndPendingBatches(r.ctx, 5)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
for _, batch := range batches {
|
||||
for _, dbBatch := range dbBatches {
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
|
||||
// get current header and parent header.
|
||||
daBatch, err := codecv0.NewDABatchFromBytes(batch.BatchHeader)
|
||||
if err != nil {
|
||||
log.Error("Failed to initialize new DA batch from bytes", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
return
|
||||
}
|
||||
parentBatch := &orm.Batch{}
|
||||
if batch.Index > 0 {
|
||||
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
if err != nil {
|
||||
log.Error("Failed to get parent batch header", "index", batch.Index-1, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if types.RollupStatus(parentBatch.RollupStatus) == types.RollupCommitFailed {
|
||||
log.Error("Previous batch commit failed, halting further committing",
|
||||
"index", parentBatch.Index, "tx hash", parentBatch.CommitTxHash)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// get the metadata of chunks for the batch
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, batch.StartChunkIndex, batch.EndChunkIndex)
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch chunks",
|
||||
"start index", batch.StartChunkIndex,
|
||||
"end index", batch.EndChunkIndex, "error", err)
|
||||
log.Error("failed to get chunks in range", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
var blocks []*encoding.Block
|
||||
blocks, err = r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch blocks", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
blocks, getErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get blocks in range", "err", getErr)
|
||||
return
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: blocks,
|
||||
}
|
||||
var daChunk *codecv0.DAChunk
|
||||
daChunk, err = codecv0.NewDAChunk(chunk, c.TotalL1MessagesPoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("Failed to initialize new DA chunk", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
return
|
||||
}
|
||||
var daChunkBytes []byte
|
||||
daChunkBytes, err = daChunk.Encode()
|
||||
if err != nil {
|
||||
log.Error("Failed to encode DA chunk", "start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
return
|
||||
}
|
||||
encodedChunks[i] = daChunkBytes
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
calldata, err := r.l1RollupABI.Pack("commitBatch", daBatch.Version, parentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack commitBatch", "index", batch.Index, "error", err)
|
||||
if dbBatch.Index == 0 {
|
||||
log.Error("invalid args: batch index is 0, should only happen in committing genesis batch")
|
||||
return
|
||||
}
|
||||
|
||||
// send transaction
|
||||
fallbackGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
|
||||
if types.RollupStatus(batch.RollupStatus) == types.RollupCommitFailed {
|
||||
// use eth_estimateGas if this batch has been committed failed.
|
||||
fallbackGasLimit = 0
|
||||
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", batch.Hash)
|
||||
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get parent batch header", "err", getErr)
|
||||
return
|
||||
}
|
||||
txHash, err := r.commitSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, calldata, nil, fallbackGasLimit)
|
||||
|
||||
var calldata []byte
|
||||
var blob *kzg4844.Blob
|
||||
if dbChunks[0].StartBlockNumber < r.banachForkHeight { // codecv0
|
||||
calldata, err = r.constructCommitBatchPayloadCodecV0(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv0", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
} else { // codecv1
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatch payload codecv1", "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// fallbackGasLimit is non-zero only in sending non-blob transactions.
|
||||
fallbackGasLimit := uint64(float64(dbBatch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
|
||||
if types.RollupStatus(dbBatch.RollupStatus) == types.RollupCommitFailed {
|
||||
// use eth_estimateGas if this batch has been committed and failed at least once.
|
||||
fallbackGasLimit = 0
|
||||
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", dbBatch.Hash)
|
||||
}
|
||||
|
||||
txHash, err := r.commitSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, blob, fallbackGasLimit)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
"err", err,
|
||||
@@ -426,13 +425,13 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, batch.Hash, txHash.String(), types.RollupCommitting)
|
||||
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", dbBatch.Hash, "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -501,103 +500,98 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
|
||||
func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error {
|
||||
// Check batch status before send `finalizeBatch` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
var batchStatus bool
|
||||
batchStatus, err := r.getBatchStatusByIndex(batch)
|
||||
batchStatus, err := r.getBatchStatusByIndex(dbBatch)
|
||||
if err != nil {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
|
||||
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", dbBatch.Index, "err", err)
|
||||
return err
|
||||
}
|
||||
if !batchStatus {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
|
||||
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", dbBatch.Index)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
var parentBatch *orm.Batch
|
||||
parentBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
// handle unexpected db error
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
|
||||
return err
|
||||
}
|
||||
parentBatchStateRoot = parentBatch.StateRoot
|
||||
if dbBatch.Index == 0 {
|
||||
return fmt.Errorf("invalid args: batch index is 0, should only happen in finalizing genesis batch")
|
||||
}
|
||||
|
||||
var txCalldata []byte
|
||||
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get batch, index: %d, err: %w", dbBatch.Index-1, getErr)
|
||||
}
|
||||
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch chunks: %w", err)
|
||||
}
|
||||
|
||||
var aggProof *message.BatchProof
|
||||
if withProof {
|
||||
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, batch.Hash)
|
||||
if err != nil {
|
||||
log.Error("get verified proof by hash failed", "hash", batch.Hash, "err", err)
|
||||
return err
|
||||
aggProof, getErr = r.batchOrm.GetVerifiedProofByHash(r.ctx, dbBatch.Hash)
|
||||
if getErr != nil {
|
||||
return fmt.Errorf("failed to get verified proof by hash, index: %d, err: %w", dbBatch.Index, getErr)
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Error("agg_proof sanity check fails", "hash", batch.Hash, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
txCalldata, err = r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
txCalldata, err = r.l1RollupABI.Pack(
|
||||
"finalizeBatch",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatch failed", "err", err)
|
||||
return err
|
||||
return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.finalizeSender.SendTransaction(batch.Hash, &r.cfg.RollupContractAddress, txCalldata, nil, 0)
|
||||
finalizeTxHash := &txHash
|
||||
var calldata []byte
|
||||
if dbChunks[0].StartBlockNumber < r.banachForkHeight { // codecv0
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct commitBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
} else { // codecv1
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if dbErr != nil {
|
||||
return fmt.Errorf("failed to fetch blocks: %w", dbErr)
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
calldata, err = r.constructFinalizeBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err)
|
||||
}
|
||||
}
|
||||
|
||||
txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatch in layer1 failed",
|
||||
"with proof", withProof,
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(txCalldata),
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
"err", err,
|
||||
)
|
||||
return err
|
||||
}
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash)
|
||||
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch.Hash, finalizeTxHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
@@ -729,6 +723,140 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, error) {
|
||||
daBatch, err := codecv0.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch from bytes: %w", err)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv0.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
daChunkBytes, encodeErr := daChunk.Encode()
|
||||
if encodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to encode DA chunk: %w", encodeErr)
|
||||
}
|
||||
encodedChunks[i] = daChunkBytes
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[len(dbChunks)-1].TotalL1MessagesPoppedBefore + dbChunks[len(dbChunks)-1].TotalL1MessagesPoppedInChunk,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv1.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codecv1.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
encodedChunks[i] = daChunk.Encode()
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version, dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap)
|
||||
if packErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitBatch: %w", packErr)
|
||||
}
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, aggProof *message.BatchProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBatch with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBatch without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatch",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatch: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV1(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk, aggProof *message.BatchProof) ([]byte, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[len(dbChunks)-1].TotalL1MessagesPoppedBefore + dbChunks[len(dbChunks)-1].TotalL1MessagesPoppedInChunk,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
daBatch, createErr := codecv1.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
blobDataProof, getErr := daBatch.BlobDataProof()
|
||||
if getErr != nil {
|
||||
return nil, fmt.Errorf("failed to get blob data proof: %w", getErr)
|
||||
}
|
||||
|
||||
if aggProof != nil { // finalizeBatch4844 with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof4844",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
blobDataProof,
|
||||
aggProof.Proof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatchWithProof4844: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBatch4844 without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBatch4844", // Assuming the "to be implemented" bypass function name is finalizeBatch4844.
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbParentBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
blobDataProof,
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBatch4844: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
|
||||
// for unit test
|
||||
func (r *Layer2Relayer) StopSenders() {
|
||||
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
@@ -40,128 +42,173 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.StopSenders()
|
||||
l2Cfg := cfg.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BanachBlock = big.NewInt(0)
|
||||
}
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
relayer.StopSenders()
|
||||
patchGuard.Reset()
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.StopSenders()
|
||||
l2Cfg := cfg.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BanachBlock = big.NewInt(0)
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
relayer.StopSenders()
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer relayer.StopSenders()
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
|
||||
chainConfig := ¶ms.ChainConfig{}
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
chainConfig.BanachBlock = big.NewInt(0)
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
|
||||
})
|
||||
assert.True(t, ok)
|
||||
relayer.StopSenders()
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
@@ -172,7 +219,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -182,13 +229,13 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batch := &encoding.Batch{
|
||||
Index: uint64(i),
|
||||
Index: uint64(i + 1),
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
}
|
||||
@@ -228,7 +275,7 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -238,13 +285,13 @@ func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
batchHashes := make([]string, len(isSuccessful))
|
||||
for i := range batchHashes {
|
||||
batch := &encoding.Batch{
|
||||
Index: uint64(i),
|
||||
Index: uint64(i + 1),
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
}
|
||||
@@ -288,7 +335,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1)
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch2 := &encoding.Batch{
|
||||
@@ -298,14 +345,14 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2)
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, ServiceTypeL2GasOracle, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -345,7 +392,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2GasOracle, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
@@ -440,32 +487,32 @@ func testGetBatchStatusByIndex(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
|
||||
status, err := relayer.getBatchStatusByIndex(dbBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, status)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
dockercompose "scroll-tech/common/docker-compose/l1"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
@@ -25,7 +26,8 @@ var (
|
||||
// config
|
||||
cfg *config.Config
|
||||
|
||||
base *docker.App
|
||||
base *docker.App
|
||||
posL1TestEnv *dockercompose.PoSL1TestEnv
|
||||
|
||||
// l2geth client
|
||||
l2Cli *ethclient.Client
|
||||
@@ -51,9 +53,10 @@ func setupEnv(t *testing.T) {
|
||||
cfg, err = config.NewConfig("../../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
base.RunImages(t)
|
||||
base.RunL2Geth(t)
|
||||
base.RunDBImage(t)
|
||||
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L2gethImg.Endpoint()
|
||||
cfg.DBConfig = &database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
@@ -95,10 +98,19 @@ func setupEnv(t *testing.T) {
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
base.Free()
|
||||
|
||||
var err error
|
||||
posL1TestEnv, err = dockercompose.NewPoSL1TestEnv()
|
||||
if err != nil {
|
||||
log.Crit("failed to create PoS L1 test environment", "err", err)
|
||||
}
|
||||
if err := posL1TestEnv.Start(); err != nil {
|
||||
log.Crit("failed to start PoS L1 test environment", "err", err)
|
||||
}
|
||||
defer posL1TestEnv.Stop()
|
||||
|
||||
m.Run()
|
||||
|
||||
base.Free()
|
||||
}
|
||||
|
||||
func TestFunctions(t *testing.T) {
|
||||
|
||||
@@ -37,9 +37,6 @@ const (
|
||||
|
||||
// DynamicFeeTxType type for DynamicFeeTx
|
||||
DynamicFeeTxType = "DynamicFeeTx"
|
||||
|
||||
// BlobTxType type for BlobTx
|
||||
BlobTxType = "BlobTx"
|
||||
)
|
||||
|
||||
// Confirmation struct used to indicate transaction confirmation details
|
||||
@@ -163,8 +160,9 @@ func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTy
|
||||
case LegacyTxType:
|
||||
return s.estimateLegacyGas(target, data, fallbackGasLimit)
|
||||
case DynamicFeeTxType:
|
||||
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
|
||||
case BlobTxType:
|
||||
if sidecar == nil {
|
||||
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
|
||||
}
|
||||
return s.estimateBlobGas(target, data, sidecar, baseFee, blobBaseFee, fallbackGasLimit)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
|
||||
@@ -181,7 +179,7 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
err error
|
||||
)
|
||||
|
||||
if s.config.TxType == BlobTxType {
|
||||
if blob != nil {
|
||||
sidecar, err = makeSidecar(blob)
|
||||
if err != nil {
|
||||
log.Error("failed to make sidecar for blob transaction", "error", err)
|
||||
@@ -235,39 +233,36 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data
|
||||
Data: data,
|
||||
}
|
||||
case DynamicFeeTxType:
|
||||
txData = &gethTypes.DynamicFeeTx{
|
||||
Nonce: nonce,
|
||||
To: target,
|
||||
Data: data,
|
||||
Gas: feeData.gasLimit,
|
||||
AccessList: feeData.accessList,
|
||||
ChainID: s.chainID,
|
||||
GasTipCap: feeData.gasTipCap,
|
||||
GasFeeCap: feeData.gasFeeCap,
|
||||
}
|
||||
case BlobTxType:
|
||||
if target == nil {
|
||||
log.Error("blob transaction to address cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
|
||||
return nil, errors.New("blob transaction to address cannot be nil")
|
||||
}
|
||||
|
||||
if sidecar == nil {
|
||||
log.Error("blob transaction sidecar cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
|
||||
return nil, errors.New("blob transaction sidecar cannot be nil")
|
||||
}
|
||||
txData = &gethTypes.DynamicFeeTx{
|
||||
Nonce: nonce,
|
||||
To: target,
|
||||
Data: data,
|
||||
Gas: feeData.gasLimit,
|
||||
AccessList: feeData.accessList,
|
||||
ChainID: s.chainID,
|
||||
GasTipCap: feeData.gasTipCap,
|
||||
GasFeeCap: feeData.gasFeeCap,
|
||||
}
|
||||
} else {
|
||||
if target == nil {
|
||||
log.Error("blob transaction to address cannot be nil", "address", s.auth.From.String(), "chainID", s.chainID.Uint64(), "nonce", s.auth.Nonce.Uint64())
|
||||
return nil, errors.New("blob transaction to address cannot be nil")
|
||||
}
|
||||
|
||||
txData = &gethTypes.BlobTx{
|
||||
ChainID: uint256.MustFromBig(s.chainID),
|
||||
Nonce: nonce,
|
||||
GasTipCap: uint256.MustFromBig(feeData.gasTipCap),
|
||||
GasFeeCap: uint256.MustFromBig(feeData.gasFeeCap),
|
||||
Gas: feeData.gasLimit,
|
||||
To: *target,
|
||||
Data: data,
|
||||
AccessList: feeData.accessList,
|
||||
BlobFeeCap: uint256.MustFromBig(feeData.blobGasFeeCap),
|
||||
BlobHashes: sidecar.BlobHashes(),
|
||||
Sidecar: sidecar,
|
||||
txData = &gethTypes.BlobTx{
|
||||
ChainID: uint256.MustFromBig(s.chainID),
|
||||
Nonce: nonce,
|
||||
GasTipCap: uint256.MustFromBig(feeData.gasTipCap),
|
||||
GasFeeCap: uint256.MustFromBig(feeData.gasFeeCap),
|
||||
Gas: feeData.gasLimit,
|
||||
To: *target,
|
||||
Data: data,
|
||||
AccessList: feeData.accessList,
|
||||
BlobFeeCap: uint256.MustFromBig(feeData.blobGasFeeCap),
|
||||
BlobHashes: sidecar.BlobHashes(),
|
||||
Sidecar: sidecar,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -357,94 +352,95 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas
|
||||
txInfo["adjusted_gas_price"] = gasPrice.Uint64()
|
||||
|
||||
case DynamicFeeTxType:
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
if tx.BlobTxSidecar() == nil {
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
|
||||
gasTipCap = new(big.Int).Div(gasTipCap, escalateMultipleDen)
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
|
||||
gasFeeCap = new(big.Int).Div(gasFeeCap, escalateMultipleDen)
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, escalateMultipleNum)
|
||||
gasTipCap = new(big.Int).Div(gasTipCap, escalateMultipleDen)
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, escalateMultipleNum)
|
||||
gasFeeCap = new(big.Int).Div(gasFeeCap, escalateMultipleDen)
|
||||
|
||||
// adjust for rising basefee
|
||||
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
// adjust for rising basefee
|
||||
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
if originalGasTipCap.Cmp(gasTipCap) == 0 {
|
||||
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
|
||||
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
|
||||
}
|
||||
|
||||
if originalGasFeeCap.Cmp(gasFeeCap) == 0 {
|
||||
log.Warn("gas fee cap bump corner case, add 1 wei", "original", originalGasFeeCap.Uint64(), "adjusted", gasFeeCap.Uint64())
|
||||
gasFeeCap = new(big.Int).Add(gasFeeCap, big.NewInt(1))
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
} else {
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
originalBlobGasFeeCap := tx.BlobGasFeeCap()
|
||||
|
||||
// bumping at least 100%
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
|
||||
blobGasFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
|
||||
|
||||
// adjust for rising basefee
|
||||
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
// adjust for rising blobbasefee
|
||||
currentBlobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
|
||||
if blobGasFeeCap.Cmp(currentBlobGasFeeCap) < 0 {
|
||||
blobGasFeeCap = currentBlobGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxBlobGasPrice
|
||||
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
|
||||
blobGasFeeCap = maxBlobGasPrice
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
feeData.blobGasFeeCap = blobGasFeeCap
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
txInfo["original_blob_gas_fee_cap"] = originalBlobGasFeeCap.Uint64()
|
||||
txInfo["adjusted_blob_gas_fee_cap"] = blobGasFeeCap.Uint64()
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
if originalGasTipCap.Cmp(gasTipCap) == 0 {
|
||||
log.Warn("gas tip cap bump corner case, add 1 wei", "original", originalGasTipCap.Uint64(), "adjusted", gasTipCap.Uint64())
|
||||
gasTipCap = new(big.Int).Add(gasTipCap, big.NewInt(1))
|
||||
}
|
||||
|
||||
if originalGasFeeCap.Cmp(gasFeeCap) == 0 {
|
||||
log.Warn("gas fee cap bump corner case, add 1 wei", "original", originalGasFeeCap.Uint64(), "adjusted", gasFeeCap.Uint64())
|
||||
gasFeeCap = new(big.Int).Add(gasFeeCap, big.NewInt(1))
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
|
||||
case BlobTxType:
|
||||
originalGasTipCap := tx.GasTipCap()
|
||||
originalGasFeeCap := tx.GasFeeCap()
|
||||
originalBlobGasFeeCap := tx.BlobGasFeeCap()
|
||||
|
||||
// bumping at least 100%
|
||||
gasTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2))
|
||||
gasFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2))
|
||||
blobGasFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2))
|
||||
|
||||
// adjust for rising basefee
|
||||
currentGasFeeCap := getGasFeeCap(new(big.Int).SetUint64(baseFee), gasTipCap)
|
||||
if gasFeeCap.Cmp(currentGasFeeCap) < 0 {
|
||||
gasFeeCap = currentGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxGasPrice
|
||||
if gasFeeCap.Cmp(maxGasPrice) > 0 {
|
||||
gasFeeCap = maxGasPrice
|
||||
}
|
||||
|
||||
// gasTipCap <= gasFeeCap
|
||||
if gasTipCap.Cmp(gasFeeCap) > 0 {
|
||||
gasTipCap = gasFeeCap
|
||||
}
|
||||
|
||||
// adjust for rising blobbasefee
|
||||
currentBlobGasFeeCap := getBlobGasFeeCap(new(big.Int).SetUint64(blobBaseFee))
|
||||
if blobGasFeeCap.Cmp(currentBlobGasFeeCap) < 0 {
|
||||
blobGasFeeCap = currentBlobGasFeeCap
|
||||
}
|
||||
|
||||
// but don't exceed maxBlobGasPrice
|
||||
if blobGasFeeCap.Cmp(maxBlobGasPrice) > 0 {
|
||||
blobGasFeeCap = maxBlobGasPrice
|
||||
}
|
||||
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
feeData.blobGasFeeCap = blobGasFeeCap
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap.Uint64()
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap.Uint64()
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap.Uint64()
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap.Uint64()
|
||||
txInfo["original_blob_gas_fee_cap"] = originalBlobGasFeeCap.Uint64()
|
||||
txInfo["adjusted_blob_gas_fee_cap"] = blobGasFeeCap.Uint64()
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
|
||||
}
|
||||
|
||||
@@ -45,7 +45,8 @@ var (
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
posL1TestEnv *dockercompose.PoSL1TestEnv
|
||||
txTypes = []string{"LegacyTx", "DynamicFeeTx", "BlobTx"}
|
||||
txTypes = []string{"LegacyTx", "DynamicFeeTx", "DynamicFeeTx"}
|
||||
txBlob = []*kzg4844.Blob{nil, nil, randBlob()}
|
||||
txUint8Types = []uint8{0, 2, 3}
|
||||
db *gorm.DB
|
||||
testContractsAddress common.Address
|
||||
@@ -80,7 +81,7 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
privateKey = priv
|
||||
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = posL1TestEnv.Endpoint()
|
||||
|
||||
base.RunDBImage(t)
|
||||
db, err = database.InitDB(
|
||||
@@ -110,7 +111,7 @@ func setupEnv(t *testing.T) {
|
||||
|
||||
testContractsAddress = crypto.CreateAddress(auth.From, nonce)
|
||||
|
||||
tx := gethTypes.NewContractCreation(nonce, big.NewInt(0), 1000000, big.NewInt(10000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
tx := gethTypes.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(10000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err := auth.Signer(auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
err = l1Client.SendTransaction(context.Background(), signedTx)
|
||||
@@ -158,14 +159,14 @@ func testNewSender(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
// exit by Stop()
|
||||
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1 := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1.TxType = txType
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
newSender1.Stop()
|
||||
|
||||
// exit by ctx.Done()
|
||||
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2 := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2.TxType = txType
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
@@ -180,12 +181,12 @@ func testSendAndRetrieveTransaction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
hash, err := s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
|
||||
hash, err := s.SendTransaction("0", &common.Address{}, nil, txBlob[i], 0)
|
||||
assert.NoError(t, err)
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -210,12 +211,12 @@ func testSendAndRetrieveTransaction(t *testing.T) {
|
||||
}
|
||||
|
||||
func testFallbackGasLimit(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
for i, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
@@ -225,7 +226,7 @@ func testFallbackGasLimit(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// FallbackGasLimit = 0
|
||||
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, randBlob(), 0)
|
||||
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, txBlob[i], 0)
|
||||
assert.NoError(t, err)
|
||||
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
|
||||
assert.NoError(t, err)
|
||||
@@ -245,7 +246,7 @@ func testFallbackGasLimit(t *testing.T) {
|
||||
},
|
||||
)
|
||||
|
||||
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, randBlob(), 100000)
|
||||
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, txBlob[i], 100000)
|
||||
assert.NoError(t, err)
|
||||
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
|
||||
assert.NoError(t, err)
|
||||
@@ -263,8 +264,8 @@ func testFallbackGasLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func testResubmitZeroGasPriceTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
if txType == BlobTxType {
|
||||
for i, txType := range txTypes {
|
||||
if txBlob[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -272,7 +273,7 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -312,7 +313,7 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -323,16 +324,20 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
data, err := l2GasOracleABI.Pack("setL2BaseFee", big.NewInt(int64(i+1)))
|
||||
assert.NoError(t, err)
|
||||
|
||||
sidecar, err := makeSidecar(randBlob())
|
||||
assert.NoError(t, err)
|
||||
var sidecar *gethTypes.BlobTxSidecar
|
||||
if txBlob[i] != nil {
|
||||
sidecar, err = makeSidecar(txBlob[i])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
gasLimit, accessList, err := s.estimateGasLimit(&testContractsAddress, data, sidecar, nil, big.NewInt(1000000000), big.NewInt(1000000000), big.NewInt(1000000000))
|
||||
assert.NoError(t, err)
|
||||
|
||||
if txType == LegacyTxType { // Legacy transactions can not have an access list.
|
||||
assert.Equal(t, uint64(43935), gasLimit)
|
||||
assert.Equal(t, uint64(43956), gasLimit)
|
||||
assert.Nil(t, accessList)
|
||||
} else { // Dynamic fee and blob transactions can have an access list.
|
||||
assert.Equal(t, uint64(43458), gasLimit)
|
||||
assert.Equal(t, uint64(43479), gasLimit)
|
||||
assert.NotNil(t, accessList)
|
||||
}
|
||||
|
||||
@@ -341,12 +346,12 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
for i, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
// Bump gas price, gas tip cap and gas fee cap just touch the minimum threshold of 10% (default config of geth).
|
||||
cfgCopy.EscalateMultipleNum = 110
|
||||
cfgCopy.EscalateMultipleDen = 100
|
||||
@@ -360,8 +365,11 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
blobGasFeeCap: big.NewInt(1000000000),
|
||||
gasLimit: 50000,
|
||||
}
|
||||
sidecar, err := makeSidecar(randBlob())
|
||||
assert.NoError(t, err)
|
||||
var sidecar *gethTypes.BlobTxSidecar
|
||||
if txBlob[i] != nil {
|
||||
sidecar, err = makeSidecar(txBlob[i])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, sidecar, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, tx)
|
||||
@@ -383,8 +391,8 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
|
||||
}
|
||||
|
||||
func testResubmitUnderpricedTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
if txType == BlobTxType {
|
||||
for i, txType := range txTypes {
|
||||
if txBlob[i] != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -392,7 +400,7 @@ func testResubmitUnderpricedTransaction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
// Bump gas price, gas tip cap and gas fee cap less than 10% (default config of geth).
|
||||
cfgCopy.EscalateMultipleNum = 109
|
||||
cfgCopy.EscalateMultipleDen = 100
|
||||
@@ -431,7 +439,7 @@ func testResubmitDynamicFeeTransactionWithRisingBaseFee(t *testing.T) {
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
txType := "DynamicFeeTx"
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
@@ -473,9 +481,8 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
txType := "BlobTx"
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = DynamicFeeTxType
|
||||
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -531,7 +538,7 @@ func testCheckPendingTransactionTxConfirmed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
|
||||
assert.NoError(t, err)
|
||||
@@ -572,7 +579,7 @@ func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeFinalizeBatch, db, nil)
|
||||
@@ -632,7 +639,7 @@ func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
|
||||
@@ -702,7 +709,7 @@ func testCheckPendingTransactionTxMultipleTimesWithOnlyOneTxPending(t *testing.T
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.EscalateBlocks = 0
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeCommitBatch, db, nil)
|
||||
@@ -777,8 +784,8 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = BlobTxType
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = DynamicFeeTxType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", types.SenderTypeL1GasOracle, db, nil)
|
||||
assert.NoError(t, err)
|
||||
defer s.Stop()
|
||||
|
||||
@@ -3,6 +3,7 @@ package watcher
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -14,10 +15,10 @@ import (
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
// BatchProposer proposes batches based on available unbatched chunks.
|
||||
@@ -35,6 +36,7 @@ type BatchProposer struct {
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
forkMap map[uint64]bool
|
||||
banachForkHeight uint64
|
||||
|
||||
batchProposerCircleTotal prometheus.Counter
|
||||
proposeBatchFailureTotal prometheus.Counter
|
||||
@@ -42,6 +44,7 @@ type BatchProposer struct {
|
||||
proposeBatchUpdateInfoFailureTotal prometheus.Counter
|
||||
totalL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalL1CommitBlobSize prometheus.Gauge
|
||||
batchChunksNum prometheus.Gauge
|
||||
batchFirstBlockTimeoutReached prometheus.Counter
|
||||
batchChunksProposeNotEnoughTotal prometheus.Counter
|
||||
@@ -58,7 +61,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"forkHeights", forkHeights)
|
||||
|
||||
return &BatchProposer{
|
||||
p := &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
@@ -95,6 +98,10 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
Name: "rollup_propose_batch_total_l1_call_data_size",
|
||||
Help: "The total l1 call data size",
|
||||
}),
|
||||
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_commit_blob_size",
|
||||
Help: "The total l1 commit blob size",
|
||||
}),
|
||||
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_chunks_number",
|
||||
Help: "The number of chunks in the batch",
|
||||
@@ -108,22 +115,31 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
Help: "Total number of batch chunk propose not enough",
|
||||
}),
|
||||
}
|
||||
|
||||
// If BanachBlock is not set in chain's genesis config, banachForkHeight is inf,
|
||||
// which means batch-proposer uses codecv0 by default.
|
||||
// TODO: Must change it to real fork name.
|
||||
if chainCfg.BanachBlock != nil {
|
||||
p.banachForkHeight = chainCfg.BanachBlock.Uint64()
|
||||
} else {
|
||||
p.banachForkHeight = math.MaxUint64
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// TryProposeBatch tries to propose a new batches.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
p.batchProposerCircleTotal.Inc()
|
||||
batch, err := p.proposeBatch()
|
||||
if err != nil {
|
||||
if err := p.proposeBatch(); err != nil {
|
||||
p.proposeBatchFailureTotal.Inc()
|
||||
log.Error("proposeBatchChunks failed", "err", err)
|
||||
return
|
||||
}
|
||||
if batch == nil {
|
||||
return
|
||||
}
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, dbTX)
|
||||
}
|
||||
|
||||
func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion encoding.CodecVersion) error {
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecVersion, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure",
|
||||
"start chunk index", batch.StartChunkIndex, "end chunk index", batch.EndChunkIndex, "error", dbErr)
|
||||
@@ -140,22 +156,23 @@ func (p *BatchProposer) TryProposeBatch() {
|
||||
p.proposeBatchUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update batch info in db failed", "err", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatch() (*encoding.Batch, error) {
|
||||
func (p *BatchProposer) proposeBatch() error {
|
||||
unbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// select at most p.maxChunkNumPerBatch chunks
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dbChunks) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
maxChunksThisBatch := p.maxChunkNumPerBatch
|
||||
@@ -170,124 +187,88 @@ func (p *BatchProposer) proposeBatch() (*encoding.Batch, error) {
|
||||
}
|
||||
}
|
||||
|
||||
daChunks, err := p.getDAChunks(dbChunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
codecVersion := encoding.CodecV0
|
||||
if dbChunks[0].StartBlockNumber >= p.banachForkHeight {
|
||||
codecVersion = encoding.CodecV1
|
||||
}
|
||||
|
||||
parentDBBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
|
||||
daChunks, err := p.getDAChunks(dbChunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
dbParentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var batch encoding.Batch
|
||||
if parentDBBatch != nil {
|
||||
batch.Index = parentDBBatch.Index + 1
|
||||
parentDABatch, err := codecv0.NewDABatchFromBytes(parentDBBatch.BatchHeader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
batch.TotalL1MessagePoppedBefore = parentDABatch.TotalL1MessagePopped
|
||||
batch.ParentBatchHash = common.HexToHash(parentDBBatch.Hash)
|
||||
batch.Index = dbParentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
|
||||
parentBatchEndBlockNumber := daChunks[0].Blocks[0].Header.Number.Uint64() - 1
|
||||
parentBatchCodecVersion := encoding.CodecV0
|
||||
if dbParentBatch.Index > 0 && parentBatchEndBlockNumber >= p.banachForkHeight {
|
||||
parentBatchCodecVersion = encoding.CodecV1
|
||||
}
|
||||
batch.TotalL1MessagePoppedBefore, err = utils.GetTotalL1MessagePoppedBeforeBatch(dbParentBatch.BatchHeader, parentBatchCodecVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, chunk := range daChunks {
|
||||
batch.Chunks = append(batch.Chunks, chunk)
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
|
||||
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize {
|
||||
if i == 0 {
|
||||
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
dbChunks[0].StartBlockNumber,
|
||||
dbChunks[0].EndBlockNumber,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerBatch,
|
||||
)
|
||||
}
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
dbChunks[0].StartBlockNumber,
|
||||
dbChunks[0].EndBlockNumber,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerBatch,
|
||||
)
|
||||
}
|
||||
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, p.maxChunkNumPerBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"currentL1CommitCalldataSize", metrics.L1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
|
||||
|
||||
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
|
||||
metrics, err := utils.CalculateBatchMetrics(&batch, codecVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.batchChunksNum.Set(float64(batch.NumChunks()))
|
||||
return &batch, nil
|
||||
p.recordBatchMetrics(metrics)
|
||||
return p.updateDBBatchInfo(&batch, codecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec ||
|
||||
batch.NumChunks() == maxChunksThisBatch {
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
"start block timestamp", dbChunks[0].StartBlockTime,
|
||||
"current time", currentTimeSec,
|
||||
)
|
||||
} else {
|
||||
log.Info("reached maximum number of chunks in batch",
|
||||
"chunk count", batch.NumChunks(),
|
||||
)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(&batch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if metrics.FirstBlockTimestamp+p.batchTimeoutSec < currentTimeSec || metrics.NumChunks == maxChunksThisBatch {
|
||||
log.Info("reached maximum number of chunks in batch or first block timeout",
|
||||
"chunk count", metrics.NumChunks,
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
"start block timestamp", dbChunks[0].StartBlockTime,
|
||||
"current time", currentTimeSec)
|
||||
|
||||
p.batchFirstBlockTimeoutReached.Inc()
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.batchChunksNum.Set(float64(batch.NumChunks()))
|
||||
|
||||
return &batch, nil
|
||||
p.recordBatchMetrics(metrics)
|
||||
return p.updateDBBatchInfo(&batch, codecVersion)
|
||||
}
|
||||
|
||||
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
|
||||
p.batchChunksProposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, error) {
|
||||
@@ -304,3 +285,10 @@ func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, e
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) recordBatchMetrics(metrics *utils.BatchMetrics) {
|
||||
p.totalL1CommitGas.Set(float64(metrics.L1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(metrics.NumChunks))
|
||||
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
|
||||
}
|
||||
|
||||
@@ -2,9 +2,12 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@@ -16,7 +19,7 @@ import (
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func testBatchProposerLimits(t *testing.T) {
|
||||
func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxChunkNum uint64
|
||||
@@ -104,8 +107,31 @@ func testBatchProposerLimits(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -122,8 +148,7 @@ func testBatchProposerLimits(t *testing.T) {
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
|
||||
@@ -141,17 +166,142 @@ func testBatchProposerLimits(t *testing.T) {
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch-1, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, tt.expectedChunksInFirstBatch-1)
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxChunkNum uint64
|
||||
batchTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxChunkNum: 10,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxChunkNum: 10,
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxChunkNumPerBatchIs1",
|
||||
maxChunkNum: 1,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxChunkNum: 10,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
forkBlock: big.NewInt(3),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 1,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(0), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(0), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: tt.maxChunkNum,
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 1,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{
|
||||
BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock,
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, tt.expectedBatchesLen+1)
|
||||
batches = batches[1:]
|
||||
if tt.expectedBatchesLen > 0 {
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch))
|
||||
for _, chunk := range dbChunks {
|
||||
@@ -167,8 +317,31 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
@@ -183,8 +356,7 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(6042), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize)
|
||||
@@ -200,16 +372,16 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(1), batches[0].EndChunkIndex)
|
||||
assert.Len(t, batches, 2)
|
||||
batches = batches[1:]
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, 2)
|
||||
for _, chunk := range dbChunks {
|
||||
@@ -220,3 +392,76 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
assert.Equal(t, uint64(258383), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(6035), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV1)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 1,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
}, ¶ms.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for total := int64(0); total < 7; total++ {
|
||||
for i := int64(0); i < 10; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(total*10 + i + 1)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: math.MaxUint64,
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 1,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
}, ¶ms.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
bp.TryProposeBatch()
|
||||
}
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
batches = batches[1:]
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 4)
|
||||
for i, batch := range batches {
|
||||
expected := uint64(2 * (i + 1))
|
||||
if expected > 7 {
|
||||
expected = 7
|
||||
}
|
||||
assert.Equal(t, expected, batch.EndChunkIndex)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package watcher
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -13,12 +14,14 @@ import (
|
||||
|
||||
"scroll-tech/common/forks"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
const maxBlobSize = uint64(131072)
|
||||
|
||||
// ChunkProposer proposes chunks based on available unchunked blocks.
|
||||
type ChunkProposer struct {
|
||||
ctx context.Context
|
||||
@@ -35,6 +38,7 @@ type ChunkProposer struct {
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
forkHeights []uint64
|
||||
banachForkHeight uint64
|
||||
|
||||
chunkProposerCircleTotal prometheus.Counter
|
||||
proposeChunkFailureTotal prometheus.Counter
|
||||
@@ -43,6 +47,7 @@ type ChunkProposer struct {
|
||||
chunkTxNum prometheus.Gauge
|
||||
chunkEstimateL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalL1CommitBlobSize prometheus.Gauge
|
||||
maxTxConsumption prometheus.Gauge
|
||||
chunkBlocksNum prometheus.Gauge
|
||||
chunkFirstBlockTimeoutReached prometheus.Counter
|
||||
@@ -61,7 +66,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"forkHeights", forkHeights)
|
||||
|
||||
return &ChunkProposer{
|
||||
p := &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
@@ -103,6 +108,10 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
Name: "rollup_propose_chunk_total_l1_commit_call_data_size",
|
||||
Help: "The total l1 commit call data size",
|
||||
}),
|
||||
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_total_l1_commit_blob_size",
|
||||
Help: "The total l1 commit blob size",
|
||||
}),
|
||||
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_max_tx_consumption",
|
||||
Help: "The max tx consumption",
|
||||
@@ -120,32 +129,36 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
Help: "Total number of chunk block propose not enough",
|
||||
}),
|
||||
}
|
||||
|
||||
// If BanachBlock is not set in chain's genesis config, banachForkHeight is inf,
|
||||
// which means chunk-proposer uses codecv0 by default.
|
||||
// TODO: Must change it to real fork name.
|
||||
if chainCfg.BanachBlock != nil {
|
||||
p.banachForkHeight = chainCfg.BanachBlock.Uint64()
|
||||
} else {
|
||||
p.banachForkHeight = math.MaxUint64
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// TryProposeChunk tries to propose a new chunk.
|
||||
func (p *ChunkProposer) TryProposeChunk() {
|
||||
p.chunkProposerCircleTotal.Inc()
|
||||
proposedChunk, err := p.proposeChunk()
|
||||
if err != nil {
|
||||
if err := p.proposeChunk(); err != nil {
|
||||
p.proposeChunkFailureTotal.Inc()
|
||||
log.Error("propose new chunk failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
|
||||
p.proposeChunkUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update chunk info in orm failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) updateChunkInfoInDB(chunk *encoding.Chunk) error {
|
||||
func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion encoding.CodecVersion) error {
|
||||
if chunk == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.proposeChunkUpdateInfoTotal.Inc()
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, codecVersion, dbTX)
|
||||
if err != nil {
|
||||
log.Warn("ChunkProposer.InsertChunk failed", "err", err)
|
||||
return err
|
||||
@@ -156,13 +169,18 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *encoding.Chunk) error {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
p.proposeChunkUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update chunk info in orm failed", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() (*encoding.Chunk, error) {
|
||||
func (p *ChunkProposer) proposeChunk() error {
|
||||
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
maxBlocksThisChunk := p.maxBlockNumPerChunk
|
||||
@@ -174,157 +192,91 @@ func (p *ChunkProposer) proposeChunk() (*encoding.Chunk, error) {
|
||||
// select at most maxBlocksThisChunk blocks
|
||||
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
codecVersion := encoding.CodecV0
|
||||
if blocks[0].Header.Number.Uint64() >= p.banachForkHeight {
|
||||
codecVersion = encoding.CodecV1
|
||||
}
|
||||
|
||||
var chunk encoding.Chunk
|
||||
for i, block := range blocks {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
|
||||
crcMax, err := chunk.CrcMax()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get crc max: %w", err)
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
|
||||
totalTxNum := chunk.NumTransactions()
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
|
||||
}
|
||||
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
|
||||
if totalTxNum > p.maxTxNumPerChunk ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
|
||||
crcMax > p.maxRowConsumptionPerChunk {
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
overEstimatedL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
|
||||
if metrics.TxNum > p.maxTxNumPerChunk ||
|
||||
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
|
||||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize {
|
||||
if i == 0 {
|
||||
if totalTxNum > p.maxTxNumPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
|
||||
block.Header.Number,
|
||||
totalTxNum,
|
||||
p.maxTxNumPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
block.Header.Number,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
block.Header.Number,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if crcMax > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, crc max: %v, limit: %v",
|
||||
block.Header.Number,
|
||||
crcMax,
|
||||
p.maxRowConsumptionPerChunk,
|
||||
)
|
||||
}
|
||||
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
|
||||
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v",
|
||||
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in chunking",
|
||||
"totalTxNum", totalTxNum,
|
||||
"maxTxNumPerChunk", p.maxTxNumPerChunk,
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
|
||||
"chunkRowConsumptionMax", crcMax,
|
||||
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
|
||||
"txNum", metrics.TxNum,
|
||||
"maxTxNum", p.maxTxNumPerChunk,
|
||||
"l1CommitCalldataSize", metrics.L1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSize", p.maxL1CommitCalldataSizePerChunk,
|
||||
"overEstimatedL1CommitGas", overEstimatedL1CommitGas,
|
||||
"maxL1CommitGas", p.maxL1CommitGasPerChunk,
|
||||
"rowConsumption", metrics.CrcMax,
|
||||
"maxRowConsumption", p.maxRowConsumptionPerChunk,
|
||||
"maxBlobSize", maxBlobSize)
|
||||
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
|
||||
|
||||
crcMax, err := chunk.CrcMax()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get crc max: %w", err)
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
|
||||
}
|
||||
|
||||
p.chunkTxNum.Set(float64(chunk.NumTransactions()))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.maxTxConsumption.Set(float64(crcMax))
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
return &chunk, nil
|
||||
p.recordChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec ||
|
||||
uint64(len(chunk.Blocks)) == maxBlocksThisChunk {
|
||||
if chunk.Blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"block number", chunk.Blocks[0].Header.Number,
|
||||
"block timestamp", chunk.Blocks[0].Header.Time,
|
||||
"current time", currentTimeSec,
|
||||
)
|
||||
} else {
|
||||
log.Info("reached maximum number of blocks in chunk",
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
"block count", len(chunk.Blocks),
|
||||
)
|
||||
}
|
||||
|
||||
crcMax, err := chunk.CrcMax()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get crc max: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(&chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(&chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
|
||||
}
|
||||
if metrics.FirstBlockTimestamp+p.chunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
|
||||
log.Info("reached maximum number of blocks in chunk or first block timeout",
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
"block count", len(chunk.Blocks),
|
||||
"block number", chunk.Blocks[0].Header.Number,
|
||||
"block timestamp", metrics.FirstBlockTimestamp,
|
||||
"current time", currentTimeSec)
|
||||
|
||||
p.chunkFirstBlockTimeoutReached.Inc()
|
||||
p.chunkTxNum.Set(float64(chunk.NumTransactions()))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.maxTxConsumption.Set(float64(crcMax))
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
return &chunk, nil
|
||||
p.recordChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion)
|
||||
}
|
||||
|
||||
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
|
||||
p.chunkBlocksProposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) recordChunkMetrics(metrics *utils.ChunkMetrics) {
|
||||
p.chunkTxNum.Set(float64(metrics.TxNum))
|
||||
p.maxTxConsumption.Set(float64(metrics.CrcMax))
|
||||
p.chunkBlocksNum.Set(float64(metrics.NumBlocks))
|
||||
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(metrics.L1CommitGas))
|
||||
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func testChunkProposerLimits(t *testing.T) {
|
||||
func testChunkProposerCodecv0Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
@@ -198,3 +199,164 @@ func testChunkProposerLimits(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerCodecv1Limits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
maxTxNum uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
forkBlock *big.Int
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 0,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 0,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs0",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxRowConsumption: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxBlockNumPerChunkIs1",
|
||||
maxBlockNum: 1,
|
||||
maxTxNum: 10000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 2,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs1",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxRowConsumption: 1,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
forkBlock: big.NewInt(2),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxTxNumPerChunk: tt.maxTxNum,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 1,
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
}, ¶ms.ChainConfig{BanachBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, tt.expectedChunksLen)
|
||||
|
||||
if len(chunks) > 0 {
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk)
|
||||
firstChunkHash := chunks[0].Hash
|
||||
for _, chunkHash := range chunkHashes {
|
||||
assert.Equal(t, firstChunkHash, chunkHash)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerCodecv1BlobSizeLimit(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(0); i < 2000; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(i + 1)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 1,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
}, ¶ms.ChainConfig{BanachBlock: big.NewInt(0)}, db, nil)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 4)
|
||||
for i, chunk := range chunks {
|
||||
expected := uint64(551 * (i + 1))
|
||||
if expected > 2000 {
|
||||
expected = 2000
|
||||
}
|
||||
assert.Equal(t, expected, chunk.EndBlockNumber)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
}
|
||||
}
|
||||
|
||||
const blockTracesFetchLimit = uint64(10)
|
||||
const blocksFetchLimit = uint64(10)
|
||||
|
||||
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
@@ -71,8 +71,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
}
|
||||
|
||||
// Fetch and store block traces for missing blocks
|
||||
for from := heightInDB + 1; from <= blockHeight; from += blockTracesFetchLimit {
|
||||
to := from + blockTracesFetchLimit - 1
|
||||
for from := heightInDB + 1; from <= blockHeight; from += blocksFetchLimit {
|
||||
to := from + blocksFetchLimit - 1
|
||||
|
||||
if to > blockHeight {
|
||||
to = blockHeight
|
||||
|
||||
@@ -57,25 +57,9 @@ func setupEnv(t *testing.T) (err error) {
|
||||
l2Cli, err = base.L2Client()
|
||||
assert.NoError(t, err)
|
||||
|
||||
templateBlockTrace1, err := os.ReadFile("../../../testdata/blockTrace_02.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
block1 = &encoding.Block{}
|
||||
if err = json.Unmarshal(templateBlockTrace1, block1); err != nil {
|
||||
return err
|
||||
}
|
||||
block1 = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
block2 = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../../../testdata/blockTrace_03.json")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// unmarshal blockTrace
|
||||
block2 = &encoding.Block{}
|
||||
if err = json.Unmarshal(templateBlockTrace2, block2); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -113,9 +97,22 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
|
||||
|
||||
// Run chunk proposer test cases.
|
||||
t.Run("TestChunkProposerLimits", testChunkProposerLimits)
|
||||
t.Run("TestChunkProposerCodecv0Limits", testChunkProposerCodecv0Limits)
|
||||
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
|
||||
t.Run("TestChunkProposerCodecv1BlobSizeLimit", testChunkProposerCodecv1BlobSizeLimit)
|
||||
|
||||
// Run chunk proposer test cases.
|
||||
t.Run("TestBatchProposerLimits", testBatchProposerLimits)
|
||||
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
|
||||
t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeEstimation", testBatchCommitGasAndCalldataSizeEstimation)
|
||||
t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
data, err := os.ReadFile(filename)
|
||||
assert.NoError(t, err)
|
||||
|
||||
block := &encoding.Block{}
|
||||
assert.NoError(t, json.Unmarshal(data, block))
|
||||
return block
|
||||
}
|
||||
|
||||
@@ -7,14 +7,15 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
rutils "scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
// Batch represents a batch of chunks.
|
||||
@@ -136,9 +137,6 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
|
||||
|
||||
var latestBatch Batch
|
||||
if err := db.First(&latestBatch).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
|
||||
}
|
||||
return &latestBatch, nil
|
||||
@@ -149,12 +147,7 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error)
|
||||
// Get the latest batch
|
||||
latestBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
|
||||
}
|
||||
// if parentBatch==nil then err==gorm.ErrRecordNotFound,
|
||||
// which means there is not batched chunk yet, thus returns 0
|
||||
if latestBatch == nil {
|
||||
return 0, nil
|
||||
return 0, fmt.Errorf("Batch.GetFirstUnbatchedChunkIndex error: %w", err)
|
||||
}
|
||||
return latestBatch.EndChunkIndex + 1, nil
|
||||
}
|
||||
@@ -226,7 +219,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if batch == nil {
|
||||
return nil, errors.New("invalid args: batch is nil")
|
||||
}
|
||||
@@ -236,94 +229,48 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
return nil, errors.New("invalid args: batch contains 0 chunk")
|
||||
}
|
||||
|
||||
daBatch, err := codecv0.NewDABatch(batch)
|
||||
metrics, err := rutils.CalculateBatchMetrics(batch, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("failed to create new DA batch",
|
||||
log.Error("failed to calculate batch metrics",
|
||||
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateBatchL1CommitGas(batch)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate batch L1 commit gas",
|
||||
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", len(batch.Chunks), "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateBatchL1CommitCalldataSize(batch)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate batch L1 commit calldata size",
|
||||
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", len(batch.Chunks), "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
var startChunkIndex uint64
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
|
||||
// no batch record in the db, we then use default empty values for the creating batch;
|
||||
// if parentBatch!=nil then err==nil, then we fill the parentBatch-related data into the creating batch
|
||||
if parentBatch != nil {
|
||||
if batch.Index > 0 {
|
||||
parentBatch, getErr := o.GetBatchByIndex(ctx, batch.Index-1)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get batch by index", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", getErr)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", getErr)
|
||||
}
|
||||
startChunkIndex = parentBatch.EndChunkIndex + 1
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("failed to create start DA chunk", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
startDAChunkHash, err := startDAChunk.Hash()
|
||||
if err != nil {
|
||||
log.Error("failed to get start DA chunk hash", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
totalL1MessagePoppedBeforeEndDAChunk := batch.TotalL1MessagePoppedBefore
|
||||
for i := uint64(0); i < numChunks-1; i++ {
|
||||
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
|
||||
}
|
||||
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
log.Error("failed to create end DA chunk", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
endDAChunkHash, err := endDAChunk.Hash()
|
||||
if err != nil {
|
||||
log.Error("failed to get end DA chunk hash", "index", batch.Index, "total l1 message popped before", totalL1MessagePoppedBeforeEndDAChunk,
|
||||
log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
Hash: batchMeta.BatchHash.Hex(),
|
||||
StartChunkHash: batchMeta.StartChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
EndChunkHash: batchMeta.EndChunkHash.Hex(),
|
||||
EndChunkIndex: startChunkIndex + numChunks - 1,
|
||||
StateRoot: batch.StateRoot().Hex(),
|
||||
WithdrawRoot: batch.WithdrawRoot().Hex(),
|
||||
ParentBatchHash: batch.ParentBatchHash.Hex(),
|
||||
BatchHeader: daBatch.Encode(),
|
||||
BatchHeader: batchMeta.BatchBytes,
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -110,7 +111,7 @@ func (o *Chunk) GetUnchunkedBlockHeight(ctx context.Context) (uint64, error) {
|
||||
// Get the latest chunk
|
||||
latestChunk, err := o.getLatestChunk(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
|
||||
return 0, fmt.Errorf("Chunk.GetUnchunkedBlockHeight error: %w", err)
|
||||
}
|
||||
if latestChunk == nil {
|
||||
// if there is no chunk, return block number 1,
|
||||
@@ -140,7 +141,7 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) (
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
@@ -165,42 +166,30 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
parentChunkStateRoot = parentChunk.StateRoot
|
||||
}
|
||||
|
||||
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
metrics, err := utils.CalculateChunkMetrics(chunk, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("failed to initialize new DA chunk", "err", err)
|
||||
log.Error("failed to calculate chunk metrics", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
daChunkHash, err := daChunk.Hash()
|
||||
chunkHash, err := utils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("failed to get DA chunk hash", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit gas", "err", err)
|
||||
log.Error("failed to get chunk hash", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
numBlocks := len(chunk.Blocks)
|
||||
newChunk := Chunk{
|
||||
Index: chunkIndex,
|
||||
Hash: daChunkHash.Hex(),
|
||||
Hash: chunkHash.Hex(),
|
||||
StartBlockNumber: chunk.Blocks[0].Header.Number.Uint64(),
|
||||
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
|
||||
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: chunk.L2GasUsed(),
|
||||
TotalL2TxNum: chunk.NumL2Transactions(),
|
||||
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
StartBlockTime: chunk.Blocks[0].Header.Time,
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
"scroll-tech/common/types/encoding/codecv1"
|
||||
"scroll-tech/database/migrate"
|
||||
)
|
||||
|
||||
@@ -29,12 +30,8 @@ var (
|
||||
batchOrm *Batch
|
||||
pendingTransactionOrm *PendingTransaction
|
||||
|
||||
block1 *encoding.Block
|
||||
block2 *encoding.Block
|
||||
chunk1 *encoding.Chunk
|
||||
chunk2 *encoding.Chunk
|
||||
chunkHash1 common.Hash
|
||||
chunkHash2 common.Hash
|
||||
block1 *encoding.Block
|
||||
block2 *encoding.Block
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -77,18 +74,6 @@ func setupEnv(t *testing.T) {
|
||||
block2 = &encoding.Block{}
|
||||
err = json.Unmarshal(templateBlockTrace, block2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk1 = &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
daChunk1, err := codecv0.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, err)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunk2 = &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
daChunk2, err := codecv0.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, err)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func tearDownEnv(t *testing.T) {
|
||||
@@ -180,147 +165,196 @@ func TestL2BlockOrm(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChunkOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
for _, codecVersion := range codecVersions {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
var chunkHash1 common.Hash
|
||||
var chunkHash2 common.Hash
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
daChunk1, createErr := codecv0.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
|
||||
daChunk2, createErr := codecv0.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
} else {
|
||||
daChunk1, createErr := codecv1.NewDAChunk(chunk1, 0)
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash1, err = daChunk1.Hash()
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
daChunk2, createErr := codecv1.NewDAChunk(chunk2, chunk1.NumL1Messages(0))
|
||||
assert.NoError(t, createErr)
|
||||
chunkHash2, err = daChunk2.Hash()
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, "", chunks[0].BatchHash)
|
||||
assert.Equal(t, "", chunks[1].BatchHash)
|
||||
dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex())
|
||||
|
||||
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned)
|
||||
assert.NoError(t, err)
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
|
||||
chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus))
|
||||
assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus))
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, "", chunks[0].BatchHash)
|
||||
assert.Equal(t, "", chunks[1].BatchHash)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
|
||||
assert.NoError(t, err)
|
||||
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, "test hash", chunks[0].BatchHash)
|
||||
assert.Equal(t, "", chunks[1].BatchHash)
|
||||
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash2.Hex(), types.ProvingTaskAssigned)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunks, err = chunkOrm.GetChunksInRange(context.Background(), 0, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(chunks[0].ProvingStatus))
|
||||
assert.Equal(t, types.ProvingTaskAssigned, types.ProvingStatus(chunks[1].ProvingStatus))
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
|
||||
assert.NoError(t, err)
|
||||
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, "test hash", chunks[0].BatchHash)
|
||||
assert.Equal(t, "", chunks[1].BatchHash)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1}
|
||||
chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}}
|
||||
chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}}
|
||||
for _, codecVersion := range codecVersions {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
}
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
hash1 := batch1.Hash
|
||||
|
||||
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var batchHash1 string
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
daBatch1, createErr := codecv0.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
} else {
|
||||
daBatch1, createErr := codecv1.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash1 = daBatch1.Hash().Hex()
|
||||
}
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
hash2 := batch2.Hash
|
||||
|
||||
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var batchHash2 string
|
||||
if codecVersion == encoding.CodecV0 {
|
||||
daBatch2, createErr := codecv0.NewDABatchFromBytes(batch2.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash2 = daBatch2.Hash().Hex()
|
||||
} else {
|
||||
daBatch2, createErr := codecv1.NewDABatchFromBytes(batch2.BatchHeader)
|
||||
assert.NoError(t, createErr)
|
||||
batchHash2 = daBatch2.Hash().Hex()
|
||||
}
|
||||
assert.Equal(t, hash2, batchHash2)
|
||||
|
||||
count, err := batchOrm.GetBatchCount(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2), count)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitFailed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pendingBatches, err := batchOrm.GetFailedAndPendingBatches(context.Background(), 100)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(pendingBatches))
|
||||
|
||||
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(rollupStatus))
|
||||
assert.Equal(t, types.RollupCommitFailed, rollupStatus[0])
|
||||
assert.Equal(t, types.RollupPending, rollupStatus[1])
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, dbProof)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
|
||||
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
|
||||
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
|
||||
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
|
||||
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
}
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
hash1 := batch1.Hash
|
||||
|
||||
batch1, err = batchOrm.GetBatchByIndex(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
daBatch1, err := codecv0.NewDABatchFromBytes(batch1.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
batchHash1 := daBatch1.Hash().Hex()
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
batch = &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
}
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), batch)
|
||||
assert.NoError(t, err)
|
||||
hash2 := batch2.Hash
|
||||
|
||||
batch2, err = batchOrm.GetBatchByIndex(context.Background(), 1)
|
||||
assert.NoError(t, err)
|
||||
daBatch2, err := codecv0.NewDABatchFromBytes(batch2.BatchHeader)
|
||||
assert.NoError(t, err)
|
||||
batchHash2 := daBatch2.Hash().Hex()
|
||||
assert.Equal(t, hash2, batchHash2)
|
||||
|
||||
count, err := batchOrm.GetBatchCount(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2), count)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash1, types.RollupCommitFailed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pendingBatches, err := batchOrm.GetFailedAndPendingBatches(context.Background(), 100)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(pendingBatches))
|
||||
|
||||
rollupStatus, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash1, batchHash2})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(rollupStatus))
|
||||
assert.Equal(t, types.RollupCommitFailed, rollupStatus[0])
|
||||
assert.Equal(t, types.RollupPending, rollupStatus[1])
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbProof, err := batchOrm.GetVerifiedProofByHash(context.Background(), batchHash1)
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, dbProof)
|
||||
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batchHash2, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
|
||||
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
|
||||
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash2, "commitTxHash", types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitted, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
|
||||
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash)
|
||||
assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
}
|
||||
|
||||
func TestTransactionOrm(t *testing.T) {
|
||||
func TestPendingTransactionOrm(t *testing.T) {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
@@ -9,6 +9,10 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
|
||||
"scroll-tech/common/types/encoding"
|
||||
"scroll-tech/common/types/encoding/codecv0"
|
||||
"scroll-tech/common/types/encoding/codecv1"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
)
|
||||
|
||||
@@ -63,3 +67,228 @@ func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error {
|
||||
}
|
||||
return abi.ParseTopics(out, indexed, log.Topics[1:])
|
||||
}
|
||||
|
||||
// ChunkMetrics indicates the metrics for proposing a chunk.
|
||||
type ChunkMetrics struct {
|
||||
// common metrics
|
||||
NumBlocks uint64
|
||||
TxNum uint64
|
||||
CrcMax uint64
|
||||
FirstBlockTimestamp uint64
|
||||
|
||||
// codecv0 metrics, default 0 for codecv1
|
||||
L1CommitCalldataSize uint64
|
||||
L1CommitGas uint64
|
||||
|
||||
// codecv1 metrics, default 0 for codecv0
|
||||
L1CommitBlobSize uint64
|
||||
}
|
||||
|
||||
// CalculateChunkMetrics calculates chunk metrics.
|
||||
func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVersion) (*ChunkMetrics, error) {
|
||||
var err error
|
||||
metrics := &ChunkMetrics{
|
||||
TxNum: chunk.NumTransactions(),
|
||||
NumBlocks: uint64(len(chunk.Blocks)),
|
||||
FirstBlockTimestamp: chunk.Blocks[0].Header.Time,
|
||||
}
|
||||
metrics.CrcMax, err = chunk.CrcMax()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get crc max: %w", err)
|
||||
}
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0:
|
||||
metrics.L1CommitCalldataSize, err = codecv0.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size: %w", err)
|
||||
}
|
||||
metrics.L1CommitGas, err = codecv0.EstimateChunkL1CommitGas(chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV1:
|
||||
metrics.L1CommitBlobSize, err = codecv1.EstimateChunkL1CommitBlobSize(chunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// BatchMetrics indicates the metrics for proposing a batch.
|
||||
type BatchMetrics struct {
|
||||
// common metrics
|
||||
NumChunks uint64
|
||||
FirstBlockTimestamp uint64
|
||||
|
||||
// codecv0 metrics, default 0 for codecv1
|
||||
L1CommitCalldataSize uint64
|
||||
L1CommitGas uint64
|
||||
|
||||
// codecv1 metrics, default 0 for codecv0
|
||||
L1CommitBlobSize uint64
|
||||
}
|
||||
|
||||
// CalculateBatchMetrics calculates batch metrics.
|
||||
func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetrics, error) {
|
||||
var err error
|
||||
metrics := &BatchMetrics{
|
||||
NumChunks: uint64(len(batch.Chunks)),
|
||||
FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time,
|
||||
}
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0:
|
||||
metrics.L1CommitGas, err = codecv0.EstimateBatchL1CommitGas(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate batch L1 commit gas: %w", err)
|
||||
}
|
||||
metrics.L1CommitCalldataSize, err = codecv0.EstimateBatchL1CommitCalldataSize(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate batch L1 commit calldata size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
case encoding.CodecV1:
|
||||
metrics.L1CommitBlobSize, err = codecv1.EstimateBatchL1CommitBlobSize(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit blob size: %w", err)
|
||||
}
|
||||
return metrics, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// GetChunkHash retrieves the hash of a chunk.
|
||||
func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, codecVersion encoding.CodecVersion) (common.Hash, error) {
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0:
|
||||
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create codecv0 DA chunk: %w", err)
|
||||
}
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv0 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
case encoding.CodecV1:
|
||||
daChunk, err := codecv1.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create codecv1 DA chunk: %w", err)
|
||||
}
|
||||
chunkHash, err := daChunk.Hash()
|
||||
if err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get codecv1 DA chunk hash: %w", err)
|
||||
}
|
||||
return chunkHash, nil
|
||||
default:
|
||||
return common.Hash{}, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// BatchMetadata represents the metadata of a batch.
|
||||
type BatchMetadata struct {
|
||||
BatchHash common.Hash
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
}
|
||||
|
||||
// GetBatchMetadata retrieves the metadata of a batch.
|
||||
func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion) (*BatchMetadata, error) {
|
||||
numChunks := len(batch.Chunks)
|
||||
totalL1MessagePoppedBeforeEndDAChunk := batch.TotalL1MessagePoppedBefore
|
||||
for i := 0; i < numChunks-1; i++ {
|
||||
totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk)
|
||||
}
|
||||
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0:
|
||||
daBatch, err := codecv0.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv0 start DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv0 start DA chunk hash: %w", err)
|
||||
}
|
||||
|
||||
endDAChunk, err := codecv0.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv0 end DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv0 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
case encoding.CodecV1:
|
||||
daBatch, err := codecv1.NewDABatch(batch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv1 start DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.StartChunkHash, err = startDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 start DA chunk hash: %w", err)
|
||||
}
|
||||
|
||||
endDAChunk, err := codecv1.NewDAChunk(batch.Chunks[numChunks-1], totalL1MessagePoppedBeforeEndDAChunk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create codecv1 end DA chunk: %w", err)
|
||||
}
|
||||
|
||||
batchMeta.EndChunkHash, err = endDAChunk.Hash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 end DA chunk hash: %w", err)
|
||||
}
|
||||
return batchMeta, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
// GetTotalL1MessagePoppedBeforeBatch retrieves the total L1 messages popped before the batch.
|
||||
func GetTotalL1MessagePoppedBeforeBatch(parentBatchBytes []byte, codecVersion encoding.CodecVersion) (uint64, error) {
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0:
|
||||
parentDABatch, err := codecv0.NewDABatchFromBytes(parentBatchBytes)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create parent DA batch from bytes using codecv0, err: %w", err)
|
||||
}
|
||||
return parentDABatch.TotalL1MessagePopped, nil
|
||||
case encoding.CodecV1:
|
||||
parentDABatch, err := codecv1.NewDABatchFromBytes(parentBatchBytes)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create parent DA batch from bytes using codecv1, err: %w", err)
|
||||
}
|
||||
return parentDABatch.TotalL1MessagePopped, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unsupported codec version: %v", codecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,346 +1,607 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity =0.8.24;
|
||||
|
||||
import {BatchHeaderV0Codec} from "../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
|
||||
import {ChunkCodecV0} from "../../contracts/src/libraries/codec/ChunkCodecV0.sol";
|
||||
import {IL1MessageQueue} from "../../contracts/src/L1/rollup/IL1MessageQueue.sol";
|
||||
import {BatchHeaderV0Codec} from "../../../contracts/src/libraries/codec/BatchHeaderV0Codec.sol";
|
||||
import {BatchHeaderV1Codec} from "../../../contracts/src/libraries/codec/BatchHeaderV1Codec.sol";
|
||||
import {ChunkCodecV0} from "../../../contracts/src/libraries/codec/ChunkCodecV0.sol";
|
||||
import {ChunkCodecV1} from "../../../contracts/src/libraries/codec/ChunkCodecV1.sol";
|
||||
|
||||
contract MockBridge {
|
||||
/******************************
|
||||
* Events from L1MessageQueue *
|
||||
******************************/
|
||||
/// @dev Thrown when committing a committed batch.
|
||||
error ErrorBatchIsAlreadyCommitted();
|
||||
|
||||
/// @notice Emitted when a new L1 => L2 transaction is appended to the queue.
|
||||
/// @param sender The address of account who initiates the transaction.
|
||||
/// @param target The address of account who will recieve the transaction.
|
||||
/// @param value The value passed with the transaction.
|
||||
/// @param queueIndex The index of this transaction in the queue.
|
||||
/// @param gasLimit Gas limit required to complete the message relay on L2.
|
||||
/// @param data The calldata of the transaction.
|
||||
event QueueTransaction(
|
||||
address indexed sender,
|
||||
address indexed target,
|
||||
uint256 value,
|
||||
uint64 queueIndex,
|
||||
uint256 gasLimit,
|
||||
bytes data
|
||||
);
|
||||
/// @dev Thrown when finalizing a verified batch.
|
||||
error ErrorBatchIsAlreadyVerified();
|
||||
|
||||
/*********************************
|
||||
* Events from L1ScrollMessenger *
|
||||
*********************************/
|
||||
/// @dev Thrown when committing empty batch (batch without chunks)
|
||||
error ErrorBatchIsEmpty();
|
||||
|
||||
/// @notice Emitted when a cross domain message is sent.
|
||||
/// @param sender The address of the sender who initiates the message.
|
||||
/// @param target The address of target contract to call.
|
||||
/// @param value The amount of value passed to the target contract.
|
||||
/// @param messageNonce The nonce of the message.
|
||||
/// @param gasLimit The optional gas limit passed to L1 or L2.
|
||||
/// @param message The calldata passed to the target contract.
|
||||
event SentMessage(
|
||||
address indexed sender,
|
||||
address indexed target,
|
||||
uint256 value,
|
||||
uint256 messageNonce,
|
||||
uint256 gasLimit,
|
||||
bytes message
|
||||
);
|
||||
/// @dev Thrown when call precompile failed.
|
||||
error ErrorCallPointEvaluationPrecompileFailed();
|
||||
|
||||
/// @notice Emitted when a cross domain message is relayed successfully.
|
||||
/// @param messageHash The hash of the message.
|
||||
event RelayedMessage(bytes32 indexed messageHash);
|
||||
/// @dev Thrown when the transaction has multiple blobs.
|
||||
error ErrorFoundMultipleBlob();
|
||||
|
||||
/***************************
|
||||
* Events from ScrollChain *
|
||||
***************************/
|
||||
/// @dev Thrown when some fields are not zero in genesis batch.
|
||||
error ErrorGenesisBatchHasNonZeroField();
|
||||
|
||||
/// @notice Emitted when a new batch is committed.
|
||||
/// @param batchHash The hash of the batch.
|
||||
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
|
||||
/// @dev Thrown when importing genesis batch twice.
|
||||
error ErrorGenesisBatchImported();
|
||||
|
||||
/// @notice Emitted when a batch is finalized.
|
||||
/// @param batchHash The hash of the batch
|
||||
/// @param stateRoot The state root on layer 2 after this batch.
|
||||
/// @param withdrawRoot The merkle root on layer2 after this batch.
|
||||
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
|
||||
/// @dev Thrown when data hash in genesis batch is zero.
|
||||
error ErrorGenesisDataHashIsZero();
|
||||
|
||||
/***********
|
||||
* Structs *
|
||||
***********/
|
||||
/// @dev Thrown when the parent batch hash in genesis batch is zero.
|
||||
error ErrorGenesisParentBatchHashIsNonZero();
|
||||
|
||||
struct L2MessageProof {
|
||||
// The index of the batch where the message belongs to.
|
||||
uint256 batchIndex;
|
||||
// Concatenation of merkle proof for withdraw merkle trie.
|
||||
bytes merkleProof;
|
||||
}
|
||||
/// @dev Thrown when the l2 transaction is incomplete.
|
||||
error ErrorIncompleteL2TransactionData();
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
*************/
|
||||
/// @dev Thrown when the batch hash is incorrect.
|
||||
error ErrorIncorrectBatchHash();
|
||||
|
||||
uint256 public messageNonce;
|
||||
mapping(uint256 => bytes32) public committedBatches;
|
||||
uint256 public l2BaseFee;
|
||||
/// @dev Thrown when the batch index is incorrect.
|
||||
error ErrorIncorrectBatchIndex();
|
||||
|
||||
/***********************************
|
||||
* Functions from L2GasPriceOracle *
|
||||
***********************************/
|
||||
/// @dev Thrown when the previous state root doesn't match stored one.
|
||||
error ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
function setL2BaseFee(uint256 _newL2BaseFee) external {
|
||||
l2BaseFee = _newL2BaseFee;
|
||||
}
|
||||
/// @dev Thrown when the batch header version is invalid.
|
||||
error ErrorInvalidBatchHeaderVersion();
|
||||
|
||||
/************************************
|
||||
* Functions from L1ScrollMessenger *
|
||||
************************************/
|
||||
/// @dev Thrown when no blob found in the transaction.
|
||||
error ErrorNoBlobFound();
|
||||
|
||||
function sendMessage(
|
||||
address target,
|
||||
uint256 value,
|
||||
bytes calldata message,
|
||||
uint256 gasLimit
|
||||
) external payable {
|
||||
bytes memory _xDomainCalldata = _encodeXDomainCalldata(msg.sender, target, value, messageNonce, message);
|
||||
{
|
||||
address _sender = applyL1ToL2Alias(address(this));
|
||||
emit QueueTransaction(_sender, target, 0, uint64(messageNonce), gasLimit, _xDomainCalldata);
|
||||
/// @dev Thrown when the number of transactions is less than number of L1 message in one block.
|
||||
error ErrorNumTxsLessThanNumL1Msgs();
|
||||
|
||||
/// @dev Thrown when the given previous state is zero.
|
||||
error ErrorPreviousStateRootIsZero();
|
||||
|
||||
/// @dev Thrown when the given state root is zero.
|
||||
error ErrorStateRootIsZero();
|
||||
|
||||
/// @dev Thrown when a chunk contains too many transactions.
|
||||
error ErrorTooManyTxsInOneChunk();
|
||||
|
||||
/// @dev Thrown when the precompile output is incorrect.
|
||||
error ErrorUnexpectedPointEvaluationPrecompileOutput();
|
||||
|
||||
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
|
||||
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
|
||||
|
||||
struct L2MessageProof {
|
||||
uint256 batchIndex;
|
||||
bytes merkleProof;
|
||||
}
|
||||
|
||||
emit SentMessage(msg.sender, target, value, messageNonce, gasLimit, message);
|
||||
messageNonce += 1;
|
||||
}
|
||||
/// @dev Address of the point evaluation precompile used for EIP-4844 blob verification.
|
||||
address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A);
|
||||
|
||||
function relayMessageWithProof(
|
||||
address _from,
|
||||
address _to,
|
||||
uint256 _value,
|
||||
uint256 _nonce,
|
||||
bytes memory _message,
|
||||
L2MessageProof memory
|
||||
) external {
|
||||
bytes memory _xDomainCalldata = _encodeXDomainCalldata(_from, _to, _value, _nonce, _message);
|
||||
bytes32 _xDomainCalldataHash = keccak256(_xDomainCalldata);
|
||||
emit RelayedMessage(_xDomainCalldataHash);
|
||||
}
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/******************************
|
||||
* Functions from ScrollChain *
|
||||
******************************/
|
||||
uint256 public l2BaseFee;
|
||||
uint256 public lastFinalizedBatchIndex;
|
||||
mapping(uint256 => bytes32) public committedBatches;
|
||||
mapping(uint256 => bytes32) public finalizedStateRoots;
|
||||
mapping(uint256 => bytes32) public withdrawRoots;
|
||||
|
||||
/// @notice Import layer 2 genesis block
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
}
|
||||
|
||||
function commitBatch(
|
||||
uint8 /*version*/,
|
||||
bytes calldata _parentBatchHeader,
|
||||
bytes[] memory chunks,
|
||||
bytes calldata /*skippedL1MessageBitmap*/
|
||||
) external {
|
||||
// check whether the batch is empty
|
||||
uint256 _chunksLength = chunks.length;
|
||||
require(_chunksLength > 0, "batch is empty");
|
||||
|
||||
// decode batch index
|
||||
uint256 headerLength = _parentBatchHeader.length;
|
||||
uint256 parentBatchPtr;
|
||||
uint256 parentBatchIndex;
|
||||
assembly {
|
||||
parentBatchPtr := mload(0x40)
|
||||
calldatacopy(parentBatchPtr, _parentBatchHeader.offset, headerLength)
|
||||
mstore(0x40, add(parentBatchPtr, headerLength))
|
||||
parentBatchIndex := shr(192, mload(add(parentBatchPtr, 1)))
|
||||
function setL2BaseFee(uint256 _newL2BaseFee) external {
|
||||
l2BaseFee = _newL2BaseFee;
|
||||
}
|
||||
|
||||
uint256 dataPtr;
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
mstore(0x40, add(dataPtr, mul(_chunksLength, 32)))
|
||||
}
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
for (uint256 i = 0; i < _chunksLength; i++) {
|
||||
_commitChunk(dataPtr, chunks[i]);
|
||||
/// @notice Import layer 2 genesis block
|
||||
function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external {
|
||||
// check genesis batch header length
|
||||
if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
unchecked {
|
||||
dataPtr += 32;
|
||||
}
|
||||
}
|
||||
// check whether the genesis batch is imported
|
||||
if (finalizedStateRoots[0] != bytes32(0)) revert ErrorGenesisBatchImported();
|
||||
|
||||
bytes32 _dataHash;
|
||||
assembly {
|
||||
let dataLen := mul(_chunksLength, 0x20)
|
||||
_dataHash := keccak256(sub(dataPtr, dataLen), dataLen)
|
||||
}
|
||||
(uint256 memPtr, bytes32 _batchHash, , ) = _loadBatchHeader(_batchHeader);
|
||||
|
||||
bytes memory paddedData = new bytes(89);
|
||||
assembly {
|
||||
mstore(add(paddedData, 57), _dataHash)
|
||||
}
|
||||
|
||||
uint256 batchPtr;
|
||||
assembly {
|
||||
batchPtr := add(paddedData, 32)
|
||||
}
|
||||
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89);
|
||||
committedBatches[0] = _batchHash;
|
||||
emit CommitBatch(parentBatchIndex + 1, _batchHash);
|
||||
}
|
||||
|
||||
function finalizeBatchWithProof(
|
||||
bytes calldata batchHeader,
|
||||
bytes32 /*prevStateRoot*/,
|
||||
bytes32 postStateRoot,
|
||||
bytes32 withdrawRoot,
|
||||
bytes calldata /*aggrProof*/
|
||||
) external {
|
||||
// decode batch index
|
||||
uint256 headerLength = batchHeader.length;
|
||||
uint256 batchPtr;
|
||||
uint256 batchIndex;
|
||||
assembly {
|
||||
batchPtr := mload(0x40)
|
||||
calldatacopy(batchPtr, batchHeader.offset, headerLength)
|
||||
mstore(0x40, add(batchPtr, headerLength))
|
||||
batchIndex := shr(192, mload(add(batchPtr, 1)))
|
||||
}
|
||||
|
||||
bytes32 _batchHash = committedBatches[0];
|
||||
emit FinalizeBatch(batchIndex, _batchHash, postStateRoot, withdrawRoot);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to generate the correct cross domain calldata for a message.
|
||||
/// @param _sender Message sender address.
|
||||
/// @param _target Target contract address.
|
||||
/// @param _value The amount of ETH pass to the target.
|
||||
/// @param _messageNonce Nonce for the provided message.
|
||||
/// @param _message Message to send to the target.
|
||||
/// @return ABI encoded cross domain calldata.
|
||||
function _encodeXDomainCalldata(
|
||||
address _sender,
|
||||
address _target,
|
||||
uint256 _value,
|
||||
uint256 _messageNonce,
|
||||
bytes memory _message
|
||||
) internal pure returns (bytes memory) {
|
||||
return
|
||||
abi.encodeWithSignature(
|
||||
"relayMessage(address,address,uint256,uint256,bytes)",
|
||||
_sender,
|
||||
_target,
|
||||
_value,
|
||||
_messageNonce,
|
||||
_message
|
||||
);
|
||||
}
|
||||
|
||||
/// @notice Utility function that converts the address in the L1 that submitted a tx to
|
||||
/// the inbox to the msg.sender viewed in the L2
|
||||
/// @param l1Address the address in the L1 that triggered the tx to L2
|
||||
/// @return l2Address L2 address as viewed in msg.sender
|
||||
function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Address) {
|
||||
uint160 offset = uint160(0x1111000000000000000000000000000000001111);
|
||||
unchecked {
|
||||
l2Address = address(uint160(l1Address) + offset);
|
||||
}
|
||||
}
|
||||
|
||||
function _commitChunk(
|
||||
uint256 memPtr,
|
||||
bytes memory _chunk
|
||||
) internal pure {
|
||||
uint256 chunkPtr;
|
||||
uint256 startDataPtr;
|
||||
uint256 dataPtr;
|
||||
uint256 blockPtr;
|
||||
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
startDataPtr := dataPtr
|
||||
chunkPtr := add(_chunk, 0x20) // skip chunkLength
|
||||
blockPtr := add(chunkPtr, 1) // skip numBlocks
|
||||
}
|
||||
|
||||
uint256 _numBlocks = ChunkCodecV0.validateChunkLength(chunkPtr, _chunk.length);
|
||||
|
||||
// concatenate block contexts
|
||||
uint256 _totalTransactionsInChunk;
|
||||
for (uint256 i = 0; i < _numBlocks; i++) {
|
||||
dataPtr = ChunkCodecV0.copyBlockContext(chunkPtr, dataPtr, i);
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock;
|
||||
blockPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
|
||||
}
|
||||
}
|
||||
|
||||
assembly {
|
||||
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
|
||||
blockPtr := add(chunkPtr, 1) // reset block ptr
|
||||
}
|
||||
|
||||
// concatenate tx hashes
|
||||
uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks);
|
||||
while (_numBlocks > 0) {
|
||||
// concatenate l2 transaction hashes
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
|
||||
for (uint256 j = 0; j < _numTransactionsInBlock; j++) {
|
||||
bytes32 txHash;
|
||||
(txHash, l2TxPtr) = ChunkCodecV0.loadL2TxHash(l2TxPtr);
|
||||
assembly {
|
||||
mstore(dataPtr, txHash)
|
||||
dataPtr := add(dataPtr, 0x20)
|
||||
// check all fields except `dataHash` and `lastBlockHash` are zero
|
||||
unchecked {
|
||||
uint256 sum = BatchHeaderV0Codec.getVersion(memPtr) +
|
||||
BatchHeaderV0Codec.getBatchIndex(memPtr) +
|
||||
BatchHeaderV0Codec.getL1MessagePopped(memPtr) +
|
||||
BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr);
|
||||
if (sum != 0) revert ErrorGenesisBatchHasNonZeroField();
|
||||
}
|
||||
}
|
||||
if (BatchHeaderV0Codec.getDataHash(memPtr) == bytes32(0)) revert ErrorGenesisDataHashIsZero();
|
||||
if (BatchHeaderV0Codec.getParentBatchHash(memPtr) != bytes32(0)) revert ErrorGenesisParentBatchHashIsNonZero();
|
||||
|
||||
unchecked {
|
||||
_numBlocks -= 1;
|
||||
blockPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
|
||||
}
|
||||
committedBatches[0] = _batchHash;
|
||||
finalizedStateRoots[0] = _stateRoot;
|
||||
|
||||
emit CommitBatch(0, _batchHash);
|
||||
emit FinalizeBatch(0, _batchHash, _stateRoot, bytes32(0));
|
||||
}
|
||||
|
||||
// check chunk has correct length
|
||||
require(l2TxPtr - chunkPtr == _chunk.length, "incomplete l2 transaction data");
|
||||
function commitBatch(
|
||||
uint8 _version,
|
||||
bytes calldata _parentBatchHeader,
|
||||
bytes[] memory _chunks,
|
||||
bytes calldata
|
||||
) external {
|
||||
// check whether the batch is empty
|
||||
if (_chunks.length == 0) revert ErrorBatchIsEmpty();
|
||||
|
||||
// compute data hash and store to memory
|
||||
assembly {
|
||||
let dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
|
||||
mstore(memPtr, dataHash)
|
||||
(, bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _loadBatchHeader(
|
||||
_parentBatchHeader
|
||||
);
|
||||
unchecked {
|
||||
_batchIndex += 1;
|
||||
}
|
||||
if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted();
|
||||
|
||||
bytes32 _batchHash;
|
||||
uint256 batchPtr;
|
||||
bytes32 _dataHash;
|
||||
uint256 _totalL1MessagesPoppedInBatch;
|
||||
if (_version == 0) {
|
||||
(_dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV0(
|
||||
_totalL1MessagesPoppedOverall,
|
||||
_chunks
|
||||
);
|
||||
assembly {
|
||||
batchPtr := mload(0x40)
|
||||
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
|
||||
}
|
||||
// store entries, the order matters
|
||||
BatchHeaderV0Codec.storeVersion(batchPtr, 0);
|
||||
BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash);
|
||||
BatchHeaderV0Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
|
||||
// compute batch hash
|
||||
_batchHash = BatchHeaderV0Codec.computeBatchHash(
|
||||
batchPtr,
|
||||
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH
|
||||
);
|
||||
} else if (_version == 1) {
|
||||
bytes32 blobVersionedHash;
|
||||
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
|
||||
_totalL1MessagesPoppedOverall,
|
||||
_chunks
|
||||
);
|
||||
assembly {
|
||||
batchPtr := mload(0x40)
|
||||
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
|
||||
}
|
||||
// store entries, the order matters
|
||||
BatchHeaderV1Codec.storeVersion(batchPtr, 1);
|
||||
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
BatchHeaderV1Codec.storeDataHash(batchPtr, _dataHash);
|
||||
BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, blobVersionedHash);
|
||||
BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash);
|
||||
// compute batch hash
|
||||
_batchHash = BatchHeaderV1Codec.computeBatchHash(
|
||||
batchPtr,
|
||||
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH
|
||||
);
|
||||
} else {
|
||||
revert ErrorInvalidBatchHeaderVersion();
|
||||
}
|
||||
|
||||
committedBatches[_batchIndex] = _batchHash;
|
||||
emit CommitBatch(_batchIndex, _batchHash);
|
||||
}
|
||||
}
|
||||
|
||||
address private constant POINT_EVALUATION_PRECOMPILE_ADDRESS = 0x000000000000000000000000000000000000000A;
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
/// @dev We keep this function to upgrade to 4844 more smoothly.
|
||||
function finalizeBatchWithProof(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata
|
||||
) external {
|
||||
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
function verifyProof(
|
||||
bytes32 claim,
|
||||
bytes memory commitment,
|
||||
bytes memory proof
|
||||
) external view {
|
||||
require(commitment.length == 48, "Commitment must be 48 bytes");
|
||||
require(proof.length == 48, "Proof must be 48 bytes");
|
||||
// compute batch hash and verify
|
||||
(, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
|
||||
bytes32 versionedHash = blobhash(0);
|
||||
// verify previous state root.
|
||||
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
// Compute random challenge point.
|
||||
uint256 point = uint256(keccak256(abi.encodePacked(versionedHash))) % BLS_MODULUS;
|
||||
// avoid duplicated verification
|
||||
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
|
||||
|
||||
bytes memory pointEvaluationCalldata = abi.encodePacked(
|
||||
versionedHash,
|
||||
point,
|
||||
claim,
|
||||
commitment,
|
||||
proof
|
||||
);
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
}
|
||||
|
||||
(bool success,) = POINT_EVALUATION_PRECOMPILE_ADDRESS.staticcall(pointEvaluationCalldata);
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
if (!success) {
|
||||
revert("Proof verification failed");
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/// @dev Memory layout of `_blobDataProof`:
|
||||
/// ```text
|
||||
/// | z | y | kzg_commitment | kzg_proof |
|
||||
/// |---------|---------|----------------|-----------|
|
||||
/// | bytes32 | bytes32 | bytes48 | bytes48 |
|
||||
/// ```
|
||||
function finalizeBatchWithProof4844(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata _blobDataProof,
|
||||
bytes calldata
|
||||
) external {
|
||||
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
// compute batch hash and verify
|
||||
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
|
||||
|
||||
// Calls the point evaluation precompile and verifies the output
|
||||
{
|
||||
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
|
||||
abi.encodePacked(_blobVersionedHash, _blobDataProof)
|
||||
);
|
||||
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
|
||||
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
||||
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
|
||||
(, uint256 result) = abi.decode(data, (uint256, uint256));
|
||||
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
|
||||
}
|
||||
|
||||
// verify previous state root.
|
||||
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
// avoid duplicated verification
|
||||
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
}
|
||||
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to commit chunks with version 0
|
||||
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks.
|
||||
/// @param _chunks The list of chunks to commit.
|
||||
/// @return _batchDataHash The computed data hash for the list of chunks.
|
||||
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
|
||||
function _commitChunksV0(
|
||||
uint256 _totalL1MessagesPoppedOverall,
|
||||
bytes[] memory _chunks
|
||||
) internal pure returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) {
|
||||
uint256 _chunksLength = _chunks.length;
|
||||
|
||||
// load `batchDataHashPtr` and reserve the memory region for chunk data hashes
|
||||
uint256 batchDataHashPtr;
|
||||
assembly {
|
||||
batchDataHashPtr := mload(0x40)
|
||||
mstore(0x40, add(batchDataHashPtr, mul(_chunksLength, 32)))
|
||||
}
|
||||
|
||||
// compute the data hash for each chunk
|
||||
for (uint256 i = 0; i < _chunksLength; i++) {
|
||||
uint256 _totalNumL1MessagesInChunk;
|
||||
bytes32 _chunkDataHash;
|
||||
(_chunkDataHash, _totalNumL1MessagesInChunk) = _commitChunkV0(
|
||||
_chunks[i],
|
||||
_totalL1MessagesPoppedInBatch,
|
||||
_totalL1MessagesPoppedOverall
|
||||
);
|
||||
unchecked {
|
||||
_totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk;
|
||||
_totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk;
|
||||
}
|
||||
assembly {
|
||||
mstore(batchDataHashPtr, _chunkDataHash)
|
||||
batchDataHashPtr := add(batchDataHashPtr, 0x20)
|
||||
}
|
||||
}
|
||||
|
||||
assembly {
|
||||
let dataLen := mul(_chunksLength, 0x20)
|
||||
_batchDataHash := keccak256(sub(batchDataHashPtr, dataLen), dataLen)
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to commit chunks with version 1
|
||||
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks.
|
||||
/// @param _chunks The list of chunks to commit.
|
||||
/// @return _blobVersionedHash The blob versioned hash for the blob carried in this transaction.
|
||||
/// @return _batchDataHash The computed data hash for the list of chunks.
|
||||
/// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one.
|
||||
function _commitChunksV1(
|
||||
uint256 _totalL1MessagesPoppedOverall,
|
||||
bytes[] memory _chunks
|
||||
)
|
||||
internal
|
||||
view
|
||||
returns (
|
||||
bytes32 _blobVersionedHash,
|
||||
bytes32 _batchDataHash,
|
||||
uint256 _totalL1MessagesPoppedInBatch
|
||||
)
|
||||
{
|
||||
{
|
||||
bytes32 _secondBlob;
|
||||
// Get blob's versioned hash
|
||||
assembly {
|
||||
_blobVersionedHash := blobhash(0)
|
||||
_secondBlob := blobhash(1)
|
||||
}
|
||||
if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound();
|
||||
if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlob();
|
||||
}
|
||||
|
||||
uint256 _chunksLength = _chunks.length;
|
||||
|
||||
// load `batchDataHashPtr` and reserve the memory region for chunk data hashes
|
||||
uint256 batchDataHashPtr;
|
||||
assembly {
|
||||
batchDataHashPtr := mload(0x40)
|
||||
mstore(0x40, add(batchDataHashPtr, mul(_chunksLength, 32)))
|
||||
}
|
||||
|
||||
// compute the data hash for each chunk
|
||||
for (uint256 i = 0; i < _chunksLength; i++) {
|
||||
uint256 _totalNumL1MessagesInChunk;
|
||||
bytes32 _chunkDataHash;
|
||||
(_chunkDataHash, _totalNumL1MessagesInChunk) = _commitChunkV1(
|
||||
_chunks[i],
|
||||
_totalL1MessagesPoppedInBatch,
|
||||
_totalL1MessagesPoppedOverall
|
||||
);
|
||||
unchecked {
|
||||
_totalL1MessagesPoppedInBatch += _totalNumL1MessagesInChunk;
|
||||
_totalL1MessagesPoppedOverall += _totalNumL1MessagesInChunk;
|
||||
}
|
||||
assembly {
|
||||
mstore(batchDataHashPtr, _chunkDataHash)
|
||||
batchDataHashPtr := add(batchDataHashPtr, 0x20)
|
||||
}
|
||||
}
|
||||
|
||||
// compute the data hash for current batch
|
||||
assembly {
|
||||
let dataLen := mul(_chunksLength, 0x20)
|
||||
_batchDataHash := keccak256(sub(batchDataHashPtr, dataLen), dataLen)
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to load batch header from calldata to memory.
|
||||
/// @param _batchHeader The batch header in calldata.
|
||||
/// @return batchPtr The start memory offset of loaded batch header.
|
||||
/// @return _batchHash The hash of the loaded batch header.
|
||||
/// @return _batchIndex The index of this batch.
|
||||
/// @param _totalL1MessagesPoppedOverall The number of L1 messages popped after this batch.
|
||||
function _loadBatchHeader(bytes calldata _batchHeader)
|
||||
internal
|
||||
view
|
||||
returns (
|
||||
uint256 batchPtr,
|
||||
bytes32 _batchHash,
|
||||
uint256 _batchIndex,
|
||||
uint256 _totalL1MessagesPoppedOverall
|
||||
)
|
||||
{
|
||||
// load version from batch header, it is always the first byte.
|
||||
uint256 version;
|
||||
assembly {
|
||||
version := shr(248, calldataload(_batchHeader.offset))
|
||||
}
|
||||
|
||||
// version should be always 0 or 1 in current code
|
||||
uint256 _length;
|
||||
if (version == 0) {
|
||||
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
|
||||
} else if (version == 1) {
|
||||
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
|
||||
} else {
|
||||
revert ErrorInvalidBatchHeaderVersion();
|
||||
}
|
||||
// only check when genesis is imported
|
||||
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {
|
||||
revert ErrorIncorrectBatchHash();
|
||||
}
|
||||
_totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr);
|
||||
}
|
||||
|
||||
/// @dev Internal function to commit a chunk with version 0.
|
||||
/// @param _chunk The encoded chunk to commit.
|
||||
/// @param _totalL1MessagesPoppedInBatch The total number of L1 messages popped in the current batch before this chunk.
|
||||
/// @param _totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including the current batch, before this chunk.
|
||||
/// @return _dataHash The computed data hash for this chunk.
|
||||
/// @return _totalNumL1MessagesInChunk The total number of L1 message popped in current chunk
|
||||
function _commitChunkV0(
|
||||
bytes memory _chunk,
|
||||
uint256 _totalL1MessagesPoppedInBatch,
|
||||
uint256 _totalL1MessagesPoppedOverall
|
||||
) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
|
||||
uint256 chunkPtr;
|
||||
uint256 startDataPtr;
|
||||
uint256 dataPtr;
|
||||
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
startDataPtr := dataPtr
|
||||
chunkPtr := add(_chunk, 0x20) // skip chunkLength
|
||||
}
|
||||
|
||||
uint256 _numBlocks = ChunkCodecV0.validateChunkLength(chunkPtr, _chunk.length);
|
||||
|
||||
// concatenate block contexts, use scope to avoid stack too deep
|
||||
{
|
||||
uint256 _totalTransactionsInChunk;
|
||||
for (uint256 i = 0; i < _numBlocks; i++) {
|
||||
dataPtr = ChunkCodecV0.copyBlockContext(chunkPtr, dataPtr, i);
|
||||
uint256 blockPtr = chunkPtr + 1 + i * ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(blockPtr);
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock;
|
||||
}
|
||||
}
|
||||
assembly {
|
||||
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
|
||||
}
|
||||
}
|
||||
|
||||
// concatenate tx hashes
|
||||
uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks);
|
||||
chunkPtr += 1;
|
||||
while (_numBlocks > 0) {
|
||||
// concatenate l1 message hashes
|
||||
uint256 _numL1MessagesInBlock = ChunkCodecV0.getNumL1Messages(chunkPtr);
|
||||
|
||||
// concatenate l2 transaction hashes
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV0.getNumTransactions(chunkPtr);
|
||||
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
|
||||
for (uint256 j = _numL1MessagesInBlock; j < _numTransactionsInBlock; j++) {
|
||||
bytes32 txHash;
|
||||
(txHash, l2TxPtr) = ChunkCodecV0.loadL2TxHash(l2TxPtr);
|
||||
assembly {
|
||||
mstore(dataPtr, txHash)
|
||||
dataPtr := add(dataPtr, 0x20)
|
||||
}
|
||||
}
|
||||
|
||||
unchecked {
|
||||
_totalNumL1MessagesInChunk += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
|
||||
|
||||
_numBlocks -= 1;
|
||||
chunkPtr += ChunkCodecV0.BLOCK_CONTEXT_LENGTH;
|
||||
}
|
||||
}
|
||||
|
||||
assembly {
|
||||
chunkPtr := add(_chunk, 0x20)
|
||||
}
|
||||
// check chunk has correct length
|
||||
if (l2TxPtr - chunkPtr != _chunk.length) revert ErrorIncompleteL2TransactionData();
|
||||
|
||||
// compute data hash and store to memory
|
||||
assembly {
|
||||
_dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to commit a chunk with version 1.
|
||||
/// @param _chunk The encoded chunk to commit.
|
||||
/// @param _totalL1MessagesPoppedInBatch The total number of L1 messages popped in current batch.
|
||||
/// @param _totalL1MessagesPoppedOverall The total number of L1 messages popped in all batches including current batch.
|
||||
/// @return _dataHash The computed data hash for this chunk.
|
||||
/// @return _totalNumL1MessagesInChunk The total number of L1 message popped in current chunk
|
||||
function _commitChunkV1(
|
||||
bytes memory _chunk,
|
||||
uint256 _totalL1MessagesPoppedInBatch,
|
||||
uint256 _totalL1MessagesPoppedOverall
|
||||
) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) {
|
||||
uint256 chunkPtr;
|
||||
uint256 startDataPtr;
|
||||
uint256 dataPtr;
|
||||
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
startDataPtr := dataPtr
|
||||
chunkPtr := add(_chunk, 0x20) // skip chunkLength
|
||||
}
|
||||
|
||||
uint256 _numBlocks = ChunkCodecV1.validateChunkLength(chunkPtr, _chunk.length);
|
||||
// concatenate block contexts, use scope to avoid stack too deep
|
||||
for (uint256 i = 0; i < _numBlocks; i++) {
|
||||
dataPtr = ChunkCodecV1.copyBlockContext(chunkPtr, dataPtr, i);
|
||||
uint256 blockPtr = chunkPtr + 1 + i * ChunkCodecV1.BLOCK_CONTEXT_LENGTH;
|
||||
uint256 _numL1MessagesInBlock = ChunkCodecV1.getNumL1Messages(blockPtr);
|
||||
unchecked {
|
||||
_totalNumL1MessagesInChunk += _numL1MessagesInBlock;
|
||||
}
|
||||
}
|
||||
assembly {
|
||||
mstore(0x40, add(dataPtr, mul(_totalNumL1MessagesInChunk, 0x20))) // reserve memory for l1 message hashes
|
||||
chunkPtr := add(chunkPtr, 1)
|
||||
}
|
||||
|
||||
// the number of actual transactions in one chunk: non-skipped l1 messages + l2 txs
|
||||
uint256 _totalTransactionsInChunk;
|
||||
// concatenate tx hashes
|
||||
while (_numBlocks > 0) {
|
||||
// concatenate l1 message hashes
|
||||
uint256 _numL1MessagesInBlock = ChunkCodecV1.getNumL1Messages(chunkPtr);
|
||||
uint256 startPtr = dataPtr;
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
|
||||
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
|
||||
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
|
||||
|
||||
_numBlocks -= 1;
|
||||
chunkPtr += ChunkCodecV1.BLOCK_CONTEXT_LENGTH;
|
||||
}
|
||||
}
|
||||
|
||||
// compute data hash and store to memory
|
||||
assembly {
|
||||
_dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
|
||||
}
|
||||
}
|
||||
|
||||
function verifyProof(
|
||||
bytes32 claim,
|
||||
bytes memory commitment,
|
||||
bytes memory proof
|
||||
) external view {
|
||||
require(commitment.length == 48, "Commitment must be 48 bytes");
|
||||
require(proof.length == 48, "Proof must be 48 bytes");
|
||||
|
||||
bytes32 versionedHash = blobhash(0);
|
||||
|
||||
// Compute random challenge point.
|
||||
uint256 point = uint256(keccak256(abi.encodePacked(versionedHash))) % BLS_MODULUS;
|
||||
|
||||
bytes memory pointEvaluationCalldata = abi.encodePacked(
|
||||
versionedHash,
|
||||
point,
|
||||
claim,
|
||||
commitment,
|
||||
proof
|
||||
);
|
||||
|
||||
(bool success,) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(pointEvaluationCalldata);
|
||||
|
||||
if (!success) {
|
||||
revert("Proof verification failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ func prepareContracts(t *testing.T) {
|
||||
nonce, err := l1Client.PendingNonceAt(context.Background(), l1Auth.From)
|
||||
assert.NoError(t, err)
|
||||
scrollChainAddress := crypto.CreateAddress(l1Auth.From, nonce)
|
||||
tx := types.NewContractCreation(nonce, big.NewInt(0), 1000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
tx := types.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err := l1Auth.Signer(l1Auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
err = l1Client.SendTransaction(context.Background(), signedTx)
|
||||
@@ -174,6 +174,8 @@ func TestFunction(t *testing.T) {
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
t.Run("TestCommitBatchAndFinalizeBatch4844", testCommitBatchAndFinalizeBatch4844)
|
||||
t.Run("TestCommitBatchAndFinalizeBatchBeforeAndPost4844", testCommitBatchAndFinalizeBatchBeforeAndPost4844)
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
@@ -69,7 +70,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, relayer.ServiceTypeL2GasOracle, nil)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, false, relayer.ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -97,7 +98,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
|
||||
@@ -29,7 +29,7 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l2Relayer)
|
||||
defer l2Relayer.StopSenders()
|
||||
@@ -59,7 +59,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -69,17 +69,18 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// add some blocks to db
|
||||
var blocks []*encoding.Block
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := int64(0); i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(int64(i)),
|
||||
Number: big.NewInt(i + 1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
@@ -96,6 +97,14 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
@@ -107,12 +116,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
}, ¶ms.ChainConfig{}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
@@ -175,3 +178,256 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{BanachBlock: big.NewInt(0)}
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// add some blocks to db
|
||||
var blocks []*encoding.Block
|
||||
for i := int64(0); i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(i + 1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 1,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
|
||||
bp.TryProposeBatch()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
batch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBatchBeforeAndPost4844(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{BanachBlock: big.NewInt(5)}
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// add some blocks to db
|
||||
var blocks []*encoding.Block
|
||||
for i := int64(0); i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(i + 1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
cp.TryProposeChunk()
|
||||
bp.TryProposeBatch()
|
||||
bp.TryProposeBatch()
|
||||
|
||||
for i := uint64(0); i < 2; i++ {
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
|
||||
// fetch rollup events
|
||||
assert.Eventually(t, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,20 +65,17 @@ func (*Batch) TableName() string {
|
||||
return "batch"
|
||||
}
|
||||
|
||||
// GetLatestBatch retrieves the latest batch from the database.
|
||||
func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
|
||||
// GetBatchByIndex retrieves the batch by the given index.
|
||||
func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Order("index desc")
|
||||
db = db.Where("index = ?", index)
|
||||
|
||||
var latestBatch Batch
|
||||
if err := db.First(&latestBatch).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
|
||||
var batch Batch
|
||||
if err := db.First(&batch).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetBatchByIndex error: %w, index: %v", err, index)
|
||||
}
|
||||
return &latestBatch, nil
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
@@ -98,21 +95,17 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
log.Error("failed to create new DA batch",
|
||||
"index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var startChunkIndex uint64
|
||||
parentBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest batch", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", err)
|
||||
}
|
||||
|
||||
// if parentBatch==nil then err==gorm.ErrRecordNotFound, which means there's
|
||||
// no batch record in the db, we then use default empty values for the creating batch;
|
||||
// if parentBatch!=nil then err==nil, then we fill the parentBatch-related data into the creating batch
|
||||
if parentBatch != nil {
|
||||
var startChunkIndex uint64
|
||||
if batch.Index > 0 {
|
||||
parentBatch, getErr := o.GetBatchByIndex(ctx, batch.Index-1)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get batch by index", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore,
|
||||
"parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", getErr)
|
||||
return nil, fmt.Errorf("Batch.InsertBatch error: %w", getErr)
|
||||
}
|
||||
startChunkIndex = parentBatch.EndChunkIndex + 1
|
||||
}
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
|
||||
daChunk, err := codecv0.NewDAChunk(chunk, totalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
log.Error("failed to initialize new DA chunk", "err", err)
|
||||
log.Error("failed to create DA chunk", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
@@ -118,18 +118,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitCalldataSize, err := codecv0.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit calldata size", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
totalL1CommitGas, err := codecv0.EstimateChunkL1CommitGas(chunk)
|
||||
if err != nil {
|
||||
log.Error("failed to estimate chunk L1 commit gas", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
numBlocks := len(chunk.Blocks)
|
||||
newChunk := Chunk{
|
||||
Index: chunkIndex,
|
||||
@@ -138,10 +126,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
StartBlockHash: chunk.Blocks[0].Header.Hash().Hex(),
|
||||
EndBlockNumber: chunk.Blocks[numBlocks-1].Header.Number.Uint64(),
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: chunk.L2GasUsed(),
|
||||
TotalL2TxNum: chunk.NumL2Transactions(),
|
||||
TotalL1CommitCalldataSize: totalL1CommitCalldataSize,
|
||||
TotalL1CommitGas: totalL1CommitGas,
|
||||
StartBlockTime: chunk.Blocks[0].Header.Time,
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
|
||||
|
||||
Reference in New Issue
Block a user