mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-04-23 03:00:50 -04:00
support 15 -> 45 chunks after Curie
This commit is contained in:
@@ -30,7 +30,6 @@ type BatchProposer struct {
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxChunkNumPerBatch uint64
|
||||
maxL1CommitGasPerBatch uint64
|
||||
maxL1CommitCalldataSizePerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
@@ -55,7 +54,6 @@ type BatchProposer struct {
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
forkHeights, forkMap, _ := forks.CollectSortedForkHeights(chainCfg)
|
||||
log.Debug("new batch proposer",
|
||||
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
|
||||
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec,
|
||||
@@ -68,7 +66,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
batchOrm: orm.NewBatch(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
|
||||
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
@@ -154,13 +151,37 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatch() error {
|
||||
unbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
|
||||
firstUnbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// select at most p.maxChunkNumPerBatch chunks
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch))
|
||||
firstUnbatchedChunk, err := p.chunkOrm.GetChunksByIndex(p.ctx, firstUnbatchedChunkIndex)
|
||||
if err != nil || firstUnbatchedChunk == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startBlockNum := new(big.Int).SetUint64(firstUnbatchedChunk.StartBlockNumber)
|
||||
|
||||
var codecVersion encoding.CodecVersion
|
||||
if p.chainCfg.IsBernoulli(startBlockNum) {
|
||||
codecVersion = encoding.CodecV1
|
||||
} else {
|
||||
codecVersion = encoding.CodecV0
|
||||
}
|
||||
|
||||
var useCompression bool
|
||||
var maxChunksThisBatch uint64
|
||||
if codecVersion == encoding.CodecV1 && p.chainCfg.IsCurie(startBlockNum) {
|
||||
useCompression = true
|
||||
maxChunksThisBatch = 45
|
||||
} else {
|
||||
useCompression = false
|
||||
maxChunksThisBatch = 15
|
||||
}
|
||||
|
||||
// select at most maxChunkNumPerBatch chunks
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, firstUnbatchedChunkIndex, int(maxChunksThisBatch))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -169,7 +190,6 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
maxChunksThisBatch := p.maxChunkNumPerBatch
|
||||
for i, chunk := range dbChunks {
|
||||
// if a chunk is starting at a fork boundary, only consider earlier chunks
|
||||
if i != 0 && p.forkMap[chunk.StartBlockNumber] {
|
||||
@@ -181,20 +201,6 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
}
|
||||
}
|
||||
|
||||
var codecVersion encoding.CodecVersion
|
||||
if p.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) {
|
||||
codecVersion = encoding.CodecV1
|
||||
} else {
|
||||
codecVersion = encoding.CodecV0
|
||||
}
|
||||
|
||||
var useCompression bool
|
||||
if codecVersion == encoding.CodecV1 && p.chainCfg.IsCurie(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) {
|
||||
useCompression = true
|
||||
} else {
|
||||
useCompression = false
|
||||
}
|
||||
|
||||
daChunks, err := p.getDAChunks(dbChunks)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -208,16 +214,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
var batch encoding.Batch
|
||||
batch.Index = dbParentBatch.Index + 1
|
||||
batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash)
|
||||
parentBatchEndBlockNumber := daChunks[0].Blocks[0].Header.Number.Uint64() - 1
|
||||
parentBatchCodecVersion := encoding.CodecV0
|
||||
// Genesis batch uses codecv0 encoding, otherwise using bernoulli fork to choose codec version.
|
||||
if dbParentBatch.Index > 0 && p.chainCfg.IsBernoulli(new(big.Int).SetUint64(parentBatchEndBlockNumber)) {
|
||||
parentBatchCodecVersion = encoding.CodecV1
|
||||
}
|
||||
batch.TotalL1MessagePoppedBefore, err = utils.GetTotalL1MessagePoppedBeforeBatch(dbParentBatch.BatchHeader, parentBatchCodecVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
batch.TotalL1MessagePoppedBefore = firstUnbatchedChunk.TotalL1MessagesPoppedBefore
|
||||
|
||||
for i, chunk := range daChunks {
|
||||
batch.Chunks = append(batch.Chunks, chunk)
|
||||
@@ -232,7 +229,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
if i == 0 {
|
||||
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, p.maxChunkNumPerBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
|
||||
@@ -63,15 +63,6 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxChunkNumPerBatchIs1",
|
||||
maxChunkNum: 1,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIsFirstChunk",
|
||||
maxChunkNum: 10,
|
||||
@@ -156,7 +147,6 @@ func testBatchProposerCodecv0Limits(t *testing.T) {
|
||||
assert.Equal(t, uint64(5737), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: tt.maxChunkNum,
|
||||
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
@@ -211,13 +201,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxChunkNumPerBatchIs1",
|
||||
maxChunkNum: 1,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "ForkBlockReached",
|
||||
maxChunkNum: 10,
|
||||
@@ -282,7 +265,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: tt.maxChunkNum,
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 100000,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
@@ -365,7 +347,6 @@ func testBatchCommitGasAndCalldataSizeEstimation(t *testing.T) {
|
||||
assert.Equal(t, uint64(5737), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
@@ -453,7 +434,6 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 15,
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 100000,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
@@ -471,8 +451,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
var expectedNumBatches int
|
||||
var numChunksMultiplier uint64
|
||||
if compressed {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 15
|
||||
expectedNumBatches = 1
|
||||
numChunksMultiplier = 20
|
||||
} else {
|
||||
expectedNumBatches = 20
|
||||
numChunksMultiplier = 1
|
||||
@@ -480,12 +460,86 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
assert.Len(t, batches, expectedNumBatches)
|
||||
|
||||
for i, batch := range batches {
|
||||
expected := numChunksMultiplier * (uint64(i) + 1)
|
||||
if expected > 20 {
|
||||
expected = 20
|
||||
}
|
||||
assert.Equal(t, expected, batch.EndChunkIndex)
|
||||
assert.Equal(t, numChunksMultiplier*(uint64(i)+1), batch.EndChunkIndex)
|
||||
}
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
compressionTests := []bool{false, true} // false for uncompressed, true for compressed
|
||||
for _, compressed := range compressionTests {
|
||||
db := setupDB(t)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false)
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if compressed {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}
|
||||
} else {
|
||||
chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: 1,
|
||||
MaxL1CommitCalldataSizePerChunk: 100000,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for blockHeight := int64(1); blockHeight <= 60; blockHeight++ {
|
||||
block.Header.Number = big.NewInt(blockHeight)
|
||||
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 100000,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
}, chainConfig, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
dbBatch := batches[1]
|
||||
|
||||
var expectedChunkNum uint64
|
||||
if compressed {
|
||||
expectedChunkNum = 45
|
||||
} else {
|
||||
expectedChunkNum = 15
|
||||
}
|
||||
assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex)
|
||||
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,6 +114,7 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeEstimation", testBatchCommitGasAndCalldataSizeEstimation)
|
||||
t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit)
|
||||
t.Run("TestBatchProposerMaxChunkNumPerBatchLimit", testBatchProposerMaxChunkNumPerBatchLimit)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
|
||||
Reference in New Issue
Block a user