mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-09 14:08:03 -05:00
refactor(rollup relayer): remove max_block_num_per_chunk configuration parameter (#1729)
Co-authored-by: jonastheis <jonastheis@users.noreply.github.com>
This commit is contained in:
@@ -5,7 +5,7 @@ import (
|
|||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
)
|
)
|
||||||
|
|
||||||
var tag = "v4.5.45"
|
var tag = "v4.5.46"
|
||||||
|
|
||||||
var commit = func() string {
|
var commit = func() string {
|
||||||
if info, ok := debug.ReadBuildInfo(); ok {
|
if info, ok := debug.ReadBuildInfo(); ok {
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
},
|
},
|
||||||
"chunk_proposer_config": {
|
"chunk_proposer_config": {
|
||||||
"propose_interval_milliseconds": 100,
|
"propose_interval_milliseconds": 100,
|
||||||
"max_block_num_per_chunk": 100,
|
|
||||||
"max_l2_gas_per_chunk": 20000000,
|
"max_l2_gas_per_chunk": 20000000,
|
||||||
"chunk_timeout_sec": 300,
|
"chunk_timeout_sec": 300,
|
||||||
"max_uncompressed_batch_bytes_size": 4194304
|
"max_uncompressed_batch_bytes_size": 4194304
|
||||||
|
|||||||
@@ -92,7 +92,6 @@
|
|||||||
},
|
},
|
||||||
"chunk_proposer_config": {
|
"chunk_proposer_config": {
|
||||||
"propose_interval_milliseconds": 100,
|
"propose_interval_milliseconds": 100,
|
||||||
"max_block_num_per_chunk": 100,
|
|
||||||
"max_l2_gas_per_chunk": 20000000,
|
"max_l2_gas_per_chunk": 20000000,
|
||||||
"chunk_timeout_sec": 300,
|
"chunk_timeout_sec": 300,
|
||||||
"max_uncompressed_batch_bytes_size": 4194304
|
"max_uncompressed_batch_bytes_size": 4194304
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ type L2Config struct {
|
|||||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||||
type ChunkProposerConfig struct {
|
type ChunkProposerConfig struct {
|
||||||
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
|
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
|
||||||
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
|
|
||||||
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
|
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
|
||||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
|
|||||||
name: "Timeout",
|
name: "Timeout",
|
||||||
batchTimeoutSec: 0,
|
batchTimeoutSec: 0,
|
||||||
expectedBatchesLen: 1,
|
expectedBatchesLen: 1,
|
||||||
expectedChunksInFirstBatch: 2,
|
expectedChunksInFirstBatch: 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,8 +72,7 @@ func testBatchProposerLimitsCodecV7(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||||
MaxBlockNumPerChunk: 1,
|
MaxL2GasPerChunk: math.MaxUint64,
|
||||||
MaxL2GasPerChunk: 20000000,
|
|
||||||
ChunkTimeoutSec: 300,
|
ChunkTimeoutSec: 300,
|
||||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||||
}, encoding.CodecV7, ¶ms.ChainConfig{
|
}, encoding.CodecV7, ¶ms.ChainConfig{
|
||||||
@@ -154,7 +153,6 @@ func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
|
|||||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||||
|
|
||||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||||
MaxBlockNumPerChunk: math.MaxUint64,
|
|
||||||
MaxL2GasPerChunk: math.MaxUint64,
|
MaxL2GasPerChunk: math.MaxUint64,
|
||||||
ChunkTimeoutSec: 0,
|
ChunkTimeoutSec: 0,
|
||||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||||
@@ -227,7 +225,6 @@ func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
|
|||||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||||
|
|
||||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||||
MaxBlockNumPerChunk: math.MaxUint64,
|
|
||||||
MaxL2GasPerChunk: math.MaxUint64,
|
MaxL2GasPerChunk: math.MaxUint64,
|
||||||
ChunkTimeoutSec: 0,
|
ChunkTimeoutSec: 0,
|
||||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||||
@@ -309,15 +306,14 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
|||||||
|
|
||||||
// Create chunk proposer with no uncompressed batch bytes limit for chunks
|
// Create chunk proposer with no uncompressed batch bytes limit for chunks
|
||||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||||
MaxBlockNumPerChunk: 1, // One block per chunk
|
MaxL2GasPerChunk: 1200000, // One block per chunk via gas limit
|
||||||
MaxL2GasPerChunk: math.MaxUint64,
|
|
||||||
ChunkTimeoutSec: math.MaxUint32,
|
ChunkTimeoutSec: math.MaxUint32,
|
||||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||||
}, encoding.CodecV8, chainConfig, db, nil)
|
}, encoding.CodecV8, chainConfig, db, nil)
|
||||||
|
|
||||||
// Insert 2 blocks with large calldata and create 2 chunks
|
// Insert 2 blocks with large calldata and create 2 chunks
|
||||||
l2BlockOrm := orm.NewL2Block(db)
|
l2BlockOrm := orm.NewL2Block(db)
|
||||||
for i := uint64(1); i <= 2; i++ {
|
for i := uint64(1); i <= 3; i++ {
|
||||||
blockCopy := *block
|
blockCopy := *block
|
||||||
blockCopy.Header = &gethTypes.Header{}
|
blockCopy.Header = &gethTypes.Header{}
|
||||||
*blockCopy.Header = *block.Header
|
*blockCopy.Header = *block.Header
|
||||||
@@ -326,7 +322,9 @@ func testBatchProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
|||||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{&blockCopy})
|
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{&blockCopy})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
cp.TryProposeChunk() // Each call creates one chunk with one block
|
cp.TryProposeChunk() // Each chunk will contain 1 block (~3KiB)
|
||||||
|
// We create 2 chunks here, as we have 3 blocks and reach the gas limit for the 1st chunk with the 2nd block
|
||||||
|
// and the 2nd chunk with the 3rd block.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create batch proposer with 4KiB uncompressed batch bytes limit
|
// Create batch proposer with 4KiB uncompressed batch bytes limit
|
||||||
|
|||||||
@@ -86,15 +86,19 @@ func testBundleProposerLimitsCodecV7(t *testing.T) {
|
|||||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
block3 := *block1
|
||||||
|
block3.Header = &gethTypes.Header{}
|
||||||
|
*block3.Header = *block1.Header
|
||||||
|
block3.Header.Number = new(big.Int).SetUint64(block2.Header.Number.Uint64() + 1)
|
||||||
|
|
||||||
l2BlockOrm := orm.NewL2Block(db)
|
l2BlockOrm := orm.NewL2Block(db)
|
||||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2, &block3})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||||
|
|
||||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||||
MaxBlockNumPerChunk: 1,
|
MaxL2GasPerChunk: 1152994, // One block per chunk via gas limit
|
||||||
MaxL2GasPerChunk: math.MaxUint64,
|
|
||||||
ChunkTimeoutSec: math.MaxUint32,
|
ChunkTimeoutSec: math.MaxUint32,
|
||||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||||
}, encoding.CodecV7, chainConfig, db, nil)
|
}, encoding.CodecV7, chainConfig, db, nil)
|
||||||
|
|||||||
@@ -54,7 +54,6 @@ type ChunkProposer struct {
|
|||||||
// NewChunkProposer creates a new ChunkProposer instance.
|
// NewChunkProposer creates a new ChunkProposer instance.
|
||||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||||
log.Info("new chunk proposer",
|
log.Info("new chunk proposer",
|
||||||
"maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk,
|
|
||||||
"maxL2GasPerChunk", cfg.MaxL2GasPerChunk,
|
"maxL2GasPerChunk", cfg.MaxL2GasPerChunk,
|
||||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
|
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
|
||||||
"maxBlobSize", maxBlobSize)
|
"maxBlobSize", maxBlobSize)
|
||||||
@@ -232,10 +231,9 @@ func (p *ChunkProposer) ProposeChunk() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
maxBlocksThisChunk := p.cfg.MaxBlockNumPerChunk
|
// select blocks without a hard limit on count in practice (use a large value)
|
||||||
|
// The actual limits will be enforced by gas, timeout, and blob size constraints
|
||||||
// select at most maxBlocksThisChunk blocks
|
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, 1000)
|
||||||
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -251,7 +249,7 @@ func (p *ChunkProposer) ProposeChunk() error {
|
|||||||
currentHardfork := encoding.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
|
currentHardfork := encoding.GetHardforkName(p.chainCfg, blocks[i].Header.Number.Uint64(), blocks[i].Header.Time)
|
||||||
if currentHardfork != hardforkName {
|
if currentHardfork != hardforkName {
|
||||||
blocks = blocks[:i]
|
blocks = blocks[:i]
|
||||||
maxBlocksThisChunk = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork
|
// Truncate blocks at hardfork boundary
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -324,8 +322,8 @@ func (p *ChunkProposer) ProposeChunk() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
currentTimeSec := uint64(time.Now().Unix())
|
currentTimeSec := uint64(time.Now().Unix())
|
||||||
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
|
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec {
|
||||||
log.Info("reached maximum number of blocks in chunk or first block timeout",
|
log.Info("first block timeout reached",
|
||||||
"block count", len(chunk.Blocks),
|
"block count", len(chunk.Blocks),
|
||||||
"start block number", chunk.Blocks[0].Header.Number,
|
"start block number", chunk.Blocks[0].Header.Number,
|
||||||
"start block timestamp", metrics.FirstBlockTimestamp,
|
"start block timestamp", metrics.FirstBlockTimestamp,
|
||||||
|
|||||||
@@ -22,7 +22,6 @@ import (
|
|||||||
func testChunkProposerLimitsCodecV7(t *testing.T) {
|
func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
maxBlockNum uint64
|
|
||||||
maxL2Gas uint64
|
maxL2Gas uint64
|
||||||
chunkTimeoutSec uint64
|
chunkTimeoutSec uint64
|
||||||
expectedChunksLen int
|
expectedChunksLen int
|
||||||
@@ -30,14 +29,12 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "NoLimitReached",
|
name: "NoLimitReached",
|
||||||
maxBlockNum: 100,
|
|
||||||
maxL2Gas: 20_000_000,
|
maxL2Gas: 20_000_000,
|
||||||
chunkTimeoutSec: 1000000000000,
|
chunkTimeoutSec: 1000000000000,
|
||||||
expectedChunksLen: 0,
|
expectedChunksLen: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Timeout",
|
name: "Timeout",
|
||||||
maxBlockNum: 100,
|
|
||||||
maxL2Gas: 20_000_000,
|
maxL2Gas: 20_000_000,
|
||||||
chunkTimeoutSec: 0,
|
chunkTimeoutSec: 0,
|
||||||
expectedChunksLen: 1,
|
expectedChunksLen: 1,
|
||||||
@@ -45,15 +42,13 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MaxL2GasPerChunkIs0",
|
name: "MaxL2GasPerChunkIs0",
|
||||||
maxBlockNum: 10,
|
|
||||||
maxL2Gas: 0,
|
maxL2Gas: 0,
|
||||||
chunkTimeoutSec: 1000000000000,
|
chunkTimeoutSec: 1000000000000,
|
||||||
expectedChunksLen: 0,
|
expectedChunksLen: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "MaxBlockNumPerChunkIs1",
|
name: "SingleBlockByGasLimit",
|
||||||
maxBlockNum: 1,
|
maxL2Gas: 1_100_000,
|
||||||
maxL2Gas: 20_000_000,
|
|
||||||
chunkTimeoutSec: 1000000000000,
|
chunkTimeoutSec: 1000000000000,
|
||||||
expectedChunksLen: 1,
|
expectedChunksLen: 1,
|
||||||
expectedBlocksInFirstChunk: 1,
|
expectedBlocksInFirstChunk: 1,
|
||||||
@@ -62,7 +57,6 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
|||||||
// In this test the second block is not included in the chunk because together
|
// In this test the second block is not included in the chunk because together
|
||||||
// with the first block it exceeds the maxL2GasPerChunk limit.
|
// with the first block it exceeds the maxL2GasPerChunk limit.
|
||||||
name: "MaxL2GasPerChunkIsSecondBlock",
|
name: "MaxL2GasPerChunkIsSecondBlock",
|
||||||
maxBlockNum: 10,
|
|
||||||
maxL2Gas: 1_153_000,
|
maxL2Gas: 1_153_000,
|
||||||
chunkTimeoutSec: 1000000000000,
|
chunkTimeoutSec: 1000000000000,
|
||||||
expectedChunksLen: 1,
|
expectedChunksLen: 1,
|
||||||
@@ -85,7 +79,6 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
|
||||||
MaxL2GasPerChunk: tt.maxL2Gas,
|
MaxL2GasPerChunk: tt.maxL2Gas,
|
||||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||||
@@ -110,53 +103,6 @@ func testChunkProposerLimitsCodecV7(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testChunkProposerBlobSizeLimitCodecV7(t *testing.T) {
|
|
||||||
db := setupDB(t)
|
|
||||||
defer database.CloseDB(db)
|
|
||||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
|
||||||
for i := uint64(0); i < 510; i++ {
|
|
||||||
l2BlockOrm := orm.NewL2Block(db)
|
|
||||||
block.Header.Number = new(big.Int).SetUint64(i + 1)
|
|
||||||
block.Header.Time = i + 1
|
|
||||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add genesis chunk.
|
|
||||||
chunkOrm := orm.NewChunk(db)
|
|
||||||
_, err := chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
|
||||||
|
|
||||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
|
||||||
MaxBlockNumPerChunk: 255,
|
|
||||||
MaxL2GasPerChunk: math.MaxUint64,
|
|
||||||
ChunkTimeoutSec: math.MaxUint32,
|
|
||||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
|
||||||
}, encoding.CodecV7, chainConfig, db, nil)
|
|
||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
cp.TryProposeChunk()
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkOrm = orm.NewChunk(db)
|
|
||||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var expectedNumChunks int = 2
|
|
||||||
var numBlocksMultiplier uint64 = 255
|
|
||||||
assert.Len(t, chunks, expectedNumChunks)
|
|
||||||
|
|
||||||
for i, chunk := range chunks {
|
|
||||||
expected := numBlocksMultiplier * (uint64(i) + 1)
|
|
||||||
if expected > 2000 {
|
|
||||||
expected = 2000
|
|
||||||
}
|
|
||||||
assert.Equal(t, expected, chunk.EndBlockNumber)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testChunkProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
func testChunkProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
||||||
db := setupDB(t)
|
db := setupDB(t)
|
||||||
defer database.CloseDB(db)
|
defer database.CloseDB(db)
|
||||||
@@ -204,7 +150,6 @@ func testChunkProposerUncompressedBatchBytesLimitCodecV8(t *testing.T) {
|
|||||||
// Set max_uncompressed_batch_bytes_size to 4KiB (4 * 1024)
|
// Set max_uncompressed_batch_bytes_size to 4KiB (4 * 1024)
|
||||||
// One block (~3KiB) should fit, but two blocks (~6KiB) should exceed the limit
|
// One block (~3KiB) should fit, but two blocks (~6KiB) should exceed the limit
|
||||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||||
MaxBlockNumPerChunk: math.MaxUint64, // No block number limit
|
|
||||||
MaxL2GasPerChunk: math.MaxUint64, // No gas limit
|
MaxL2GasPerChunk: math.MaxUint64, // No gas limit
|
||||||
ChunkTimeoutSec: math.MaxUint32, // No timeout limit
|
ChunkTimeoutSec: math.MaxUint32, // No timeout limit
|
||||||
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
|
MaxUncompressedBatchBytesSize: 4 * 1024, // 4KiB limit
|
||||||
|
|||||||
@@ -102,7 +102,6 @@ func TestFunction(t *testing.T) {
|
|||||||
|
|
||||||
// Run chunk proposer test cases.
|
// Run chunk proposer test cases.
|
||||||
t.Run("TestChunkProposerLimitsCodecV7", testChunkProposerLimitsCodecV7)
|
t.Run("TestChunkProposerLimitsCodecV7", testChunkProposerLimitsCodecV7)
|
||||||
t.Run("TestChunkProposerBlobSizeLimitCodecV7", testChunkProposerBlobSizeLimitCodecV7)
|
|
||||||
t.Run("TestChunkProposerUncompressedBatchBytesLimitCodecV8", testChunkProposerUncompressedBatchBytesLimitCodecV8)
|
t.Run("TestChunkProposerUncompressedBatchBytesLimitCodecV8", testChunkProposerUncompressedBatchBytesLimitCodecV8)
|
||||||
|
|
||||||
// Run batch proposer test cases.
|
// Run batch proposer test cases.
|
||||||
|
|||||||
@@ -2,7 +2,6 @@
|
|||||||
"l2_config": {
|
"l2_config": {
|
||||||
"endpoint": "https://rpc.scroll.io",
|
"endpoint": "https://rpc.scroll.io",
|
||||||
"chunk_proposer_config": {
|
"chunk_proposer_config": {
|
||||||
"max_block_num_per_chunk": 100,
|
|
||||||
"max_l2_gas_per_chunk": 20000000,
|
"max_l2_gas_per_chunk": 20000000,
|
||||||
"chunk_timeout_sec": 72000000000,
|
"chunk_timeout_sec": 72000000000,
|
||||||
"max_uncompressed_batch_bytes_size": 4194304
|
"max_uncompressed_batch_bytes_size": 4194304
|
||||||
|
|||||||
@@ -118,7 +118,6 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||||
MaxBlockNumPerChunk: 100,
|
|
||||||
MaxL2GasPerChunk: math.MaxUint64,
|
MaxL2GasPerChunk: math.MaxUint64,
|
||||||
ChunkTimeoutSec: 300,
|
ChunkTimeoutSec: 300,
|
||||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||||
|
|||||||
Reference in New Issue
Block a user