feat(chunk-proposer & batch-proposer): add uncompressed batch bytes length (#1348)

This commit is contained in:
colin
2024-05-27 19:00:28 +08:00
committed by GitHub
parent eb8e46844f
commit bc22d9be43
12 changed files with 68 additions and 29 deletions

View File

@@ -378,6 +378,8 @@ github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGu
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce h1:SN43TBs7VaJt9q737eWWqGz0OCg4v+PtUn3RbJcG1o0=
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
github.com/scroll-tech/da-codec v0.0.0-20240527084248-2f17d6927ee0 h1:FMDvi7hb4i5nb9pT4EQXSP2Qn63UI1eI500L6+WiEHs=
github.com/scroll-tech/da-codec v0.0.0-20240527084248-2f17d6927ee0/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=

View File

@@ -70,13 +70,15 @@
"max_l1_commit_calldata_size_per_chunk": 112345,
"chunk_timeout_sec": 300,
"max_row_consumption_per_chunk": 1048319,
"gas_cost_increase_multiplier": 1.2
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_size": 634880
},
"batch_proposer_config": {
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_size": 634880
}
},
"db_config": {

View File

@@ -10,7 +10,7 @@ require (
github.com/go-resty/resty/v2 v2.7.0
github.com/holiman/uint256 v1.2.4
github.com/prometheus/client_golang v1.16.0
github.com/scroll-tech/da-codec v0.0.0-20240516115958-db04f5e6772c
github.com/scroll-tech/da-codec v0.0.0-20240527084248-2f17d6927ee0
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.9.0

View File

@@ -236,8 +236,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.0.0-20240516115958-db04f5e6772c h1:Vi1BGENMGO8yjmnJe6QP9Eb1OPPuEi4+wd8d/DxhJ6Q=
github.com/scroll-tech/da-codec v0.0.0-20240516115958-db04f5e6772c/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
github.com/scroll-tech/da-codec v0.0.0-20240527084248-2f17d6927ee0 h1:FMDvi7hb4i5nb9pT4EQXSP2Qn63UI1eI500L6+WiEHs=
github.com/scroll-tech/da-codec v0.0.0-20240527084248-2f17d6927ee0/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=

View File

@@ -33,6 +33,7 @@ type ChunkProposerConfig struct {
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
MaxUncompressedBatchSize uint64 `json:"max_uncompressed_batch_size"`
}
// BatchProposerConfig loads batch_proposer configuration items.
@@ -41,4 +42,5 @@ type BatchProposerConfig struct {
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
MaxUncompressedBatchSize uint64 `json:"max_uncompressed_batch_size"`
}

View File

@@ -34,6 +34,7 @@ type BatchProposer struct {
maxL1CommitCalldataSizePerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
maxUncompressedBatchSize uint64
forkMap map[uint64]bool
chainCfg *params.ChainConfig
@@ -61,6 +62,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
"batchTimeoutSec", cfg.BatchTimeoutSec,
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
"maxUncompressedBatchSize", cfg.MaxUncompressedBatchSize,
"forkHeights", forkHeights)
p := &BatchProposer{
@@ -73,6 +75,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
maxUncompressedBatchSize: cfg.MaxUncompressedBatchSize,
forkMap: forkMap,
chainCfg: chainCfg,
@@ -235,13 +238,12 @@ func (p *BatchProposer) proposeBatch() error {
p.recordTimerBatchMetrics(metrics)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
metrics.L1CommitBlobSize > maxBlobSize {
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch || totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
metrics.L1CommitBlobSize > maxBlobSize || metrics.L1CommitBatchSize > p.maxUncompressedBatchSize {
if i == 0 {
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v",
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v, maxUncompressedBatchSize: %v",
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize, p.maxUncompressedBatchSize)
}
log.Debug("breaking limit condition in batching",
@@ -251,7 +253,9 @@ func (p *BatchProposer) proposeBatch() error {
"overEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGas", p.maxL1CommitGasPerBatch,
"l1CommitBlobSize", metrics.L1CommitBlobSize,
"maxBlobSize", maxBlobSize)
"maxBlobSize", maxBlobSize,
"L1CommitBatchSize", metrics.L1CommitBatchSize,
"maxUncompressedBatchSize", p.maxUncompressedBatchSize)
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]

View File

@@ -287,9 +287,9 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
@@ -431,6 +431,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchSize: math.MaxUint64,
}, &params.ChainConfig{
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
@@ -441,9 +442,9 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
@@ -451,6 +452,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
BatchTimeoutSec: tt.batchTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchSize: math.MaxUint64,
}, &params.ChainConfig{
BernoulliBlock: big.NewInt(0),
CurieBlock: big.NewInt(0),
@@ -604,9 +606,9 @@ func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
@@ -634,7 +636,7 @@ func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
}
assert.Equal(t, uint64(161270), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
}
@@ -677,15 +679,16 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchSize: math.MaxUint64,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
assert.NoError(t, err)
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
@@ -693,6 +696,7 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 0,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchSize: math.MaxUint64,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
bp.TryProposeBatch()
@@ -713,7 +717,7 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
}
assert.Equal(t, uint64(161270), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas)
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
}
@@ -760,6 +764,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
blockHeight := int64(0)
@@ -780,6 +785,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
BatchTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
for i := 0; i < 30; i++ {
@@ -851,6 +857,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: 0,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
@@ -866,6 +873,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
BatchTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
bp.TryProposeBatch()

View File

@@ -19,8 +19,6 @@ import (
"scroll-tech/rollup/internal/utils"
)
const maxBlobSize = uint64(131072)
// ChunkProposer proposes chunks based on available unchunked blocks.
type ChunkProposer struct {
ctx context.Context
@@ -36,6 +34,7 @@ type ChunkProposer struct {
maxRowConsumptionPerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
maxUncompressedBatchSize uint64
forkHeights []uint64
chainCfg *params.ChainConfig
@@ -67,6 +66,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
"maxUncompressedBatchSize", cfg.MaxUncompressedBatchSize,
"forkHeights", forkHeights)
p := &ChunkProposer{
@@ -81,6 +81,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
maxUncompressedBatchSize: cfg.MaxUncompressedBatchSize,
forkHeights: forkHeights,
chainCfg: chainCfg,
@@ -232,11 +233,12 @@ func (p *ChunkProposer) proposeChunk() error {
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
metrics.L1CommitBlobSize > maxBlobSize {
metrics.L1CommitBlobSize > maxBlobSize ||
metrics.L1CommitBatchSize > p.maxUncompressedBatchSize {
if i == 0 {
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v",
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize)
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v, maxUncompressedBatchSize: %v",
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize, p.maxUncompressedBatchSize)
}
log.Debug("breaking limit condition in chunking",
@@ -250,7 +252,9 @@ func (p *ChunkProposer) proposeChunk() error {
"rowConsumption", metrics.CrcMax,
"maxRowConsumption", p.maxRowConsumptionPerChunk,
"l1CommitBlobSize", metrics.L1CommitBlobSize,
"maxBlobSize", maxBlobSize)
"maxBlobSize", maxBlobSize,
"L1CommitBatchSize", metrics.L1CommitBatchSize,
"maxUncompressedBatchSize", p.maxUncompressedBatchSize)
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]

View File

@@ -544,6 +544,7 @@ func testChunkProposerCodecv2Limits(t *testing.T) {
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
ChunkTimeoutSec: tt.chunkTimeoutSec,
GasCostIncreaseMultiplier: 1.2,
MaxUncompressedBatchSize: math.MaxUint64,
}, &params.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
cp.TryProposeChunk()
@@ -593,6 +594,7 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
MaxRowConsumptionPerChunk: math.MaxUint64,
ChunkTimeoutSec: math.MaxUint64,
GasCostIncreaseMultiplier: 1,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
for i := 0; i < 10; i++ {

View File

@@ -1,3 +1,5 @@
package watcher
const contractEventsBlocksFetchLimit = int64(10)
const maxBlobSize = uint64(131072)

View File

@@ -83,6 +83,9 @@ type ChunkMetrics struct {
// codecv1 metrics, default 0 for codecv0
L1CommitBlobSize uint64
// codecv2 metrics, default 0 for codecv0 & codecv1
L1CommitBatchSize uint64
// timing metrics
EstimateGasTime time.Duration
EstimateCalldataSizeTime time.Duration
@@ -143,7 +146,7 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
metrics.EstimateCalldataSizeTime = time.Since(start)
start = time.Now()
metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBlobSize(chunk)
metrics.L1CommitBatchSize, metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
metrics.EstimateBlobSizeTime = time.Since(start)
if err != nil {
return nil, fmt.Errorf("failed to estimate codecv2 chunk L1 commit blob size: %w", err)
@@ -166,6 +169,9 @@ type BatchMetrics struct {
// codecv1 metrics, default 0 for codecv0
L1CommitBlobSize uint64
// codecv2 metrics, default 0 for codecv0 & codecv1
L1CommitBatchSize uint64
// timing metrics
EstimateGasTime time.Duration
EstimateCalldataSizeTime time.Duration
@@ -220,7 +226,7 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
metrics.EstimateCalldataSizeTime = time.Since(start)
start = time.Now()
metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBlobSize(batch)
metrics.L1CommitBatchSize, metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
metrics.EstimateBlobSizeTime = time.Since(start)
if err != nil {
return nil, fmt.Errorf("failed to estimate codecv2 batch L1 commit blob size: %w", err)

View File

@@ -2,6 +2,7 @@ package tests
import (
"context"
"math"
"math/big"
"testing"
"time"
@@ -229,12 +230,14 @@ func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
MaxL1CommitCalldataSizePerChunk: 100000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: 1,
MaxL1CommitCalldataSizePerBatch: 100000,
BatchTimeoutSec: 300,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
cp.TryProposeChunk()
@@ -366,12 +369,14 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfter4844(t *testing.T) {
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
cp.TryProposeChunk()
@@ -493,12 +498,14 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) {
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
MaxUncompressedBatchSize: math.MaxUint64,
}, chainConfig, db, nil)
cp.TryProposeChunk()