Compare commits

..

6 Commits

Author SHA1 Message Date
colin
c3b2f13fe0 fix(rollup-relayer): fix sender test failure (#842)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-21 17:06:16 +08:00
colin
0d847e0427 feat(rollup-relayer): add more debug logs (#839)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-21 16:20:21 +08:00
HAOYUatHZ
7de221ba89 feat(sender): print address for checkBalance err (#836)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-21 10:37:25 +08:00
colin
16fd840896 refactor(rollup-relayer): remove some constraints in chunk and batch proposer (#825)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-20 16:45:05 +08:00
colin
20fb098a01 fix(prover): logging only on HTTP 5xx errors (#835)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-08-20 03:45:50 +08:00
HAOYUatHZ
b734282a44 fix(coordinator): bypass version check for v0.6.2 in v0.6.5 (#834)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-08-19 13:10:19 +08:00
13 changed files with 222 additions and 211 deletions

View File

@@ -64,11 +64,9 @@
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
},
"chunk_proposer_config": {
"max_tx_gas_per_chunk": 1123456,
"max_l2_tx_num_per_chunk": 1123,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"min_l1_commit_calldata_size_per_chunk": 11234,
"chunk_timeout_sec": 300,
"max_row_consumption_per_chunk": 1048319,
"gas_cost_increase_multiplier": 1.2
@@ -77,7 +75,6 @@
"max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"min_chunk_num_per_batch": 11,
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
}

View File

@@ -28,11 +28,9 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
@@ -43,7 +41,6 @@ type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
}

View File

@@ -346,8 +346,15 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
txInfo := map[string]interface{}{
"tx_hash": tx.Hash().String(),
"tx_type": s.config.TxType,
"from": auth.From.String(),
}
switch s.config.TxType {
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
originalGasPrice := feeData.gasPrice
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
if gasPrice.Cmp(feeData.gasPrice) < 0 {
@@ -357,7 +364,13 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
gasPrice = maxGasPrice
}
feeData.gasPrice = gasPrice
txInfo["original_gas_price"] = originalGasPrice
txInfo["adjusted_gas_price"] = gasPrice
default:
originalGasTipCap := big.NewInt(feeData.gasTipCap.Int64())
originalGasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
@@ -389,8 +402,15 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
txInfo["original_gas_tip_cap"] = originalGasTipCap
txInfo["adjusted_gas_tip_cap"] = gasTipCap
txInfo["original_gas_fee_cap"] = originalGasFeeCap
txInfo["adjusted_gas_fee_cap"] = gasFeeCap
}
log.Debug("Transaction gas adjustment details", txInfo)
nonce := tx.Nonce()
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
@@ -429,6 +449,12 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
}
} else if s.config.EscalateBlocks+pending.submitAt < number {
log.Debug("resubmit transaction",
"tx hash", pending.tx.Hash().String(),
"submit block number", pending.submitAt,
"current block number", number,
"escalateBlocks", s.config.EscalateBlocks)
var tx *types.Transaction
tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx)
if err != nil {
@@ -485,8 +511,8 @@ func (s *Sender) checkBalance(ctx context.Context) error {
}
if bls.Cmp(s.minBalance) < 0 {
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s",
bls.String(), s.minBalance.String())
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s, address: %s",
bls.String(), s.minBalance.String(), s.auth.From.String())
}
return nil

View File

@@ -28,7 +28,6 @@ type BatchProposer struct {
maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
@@ -39,12 +38,18 @@ type BatchProposer struct {
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstChunkTimeoutReached prometheus.Counter
batchChunksSuperposeNotEnoughTotal prometheus.Counter
batchFirstBlockTimeoutReached prometheus.Counter
batchChunksProposeNotEnoughTotal prometheus.Counter
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
log.Debug("new batch proposer",
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
"batchTimeoutSec", cfg.BatchTimeoutSec)
return &BatchProposer{
ctx: ctx,
db: db,
@@ -54,7 +59,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
@@ -86,13 +90,13 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
Name: "bridge_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
}),
batchFirstChunkTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_chunk_timeout_reached_total",
Help: "Total times of batch's first chunk timeout reached",
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_block_timeout_reached_total",
Help: "Total times of batch's first block timeout reached",
}),
batchChunksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_superpose_not_enough_total",
Help: "Total number of batch chunk superpose not enough",
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
Help: "Total number of batch chunk propose not enough",
}),
}
}
@@ -153,87 +157,86 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return nil, nil
}
firstChunk := dbChunks[0]
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
totalL1CommitGas := firstChunk.TotalL1CommitGas
totalChunks := uint64(1)
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
var totalL1CommitCalldataSize uint32
var totalL1CommitGas uint64
var totalChunks uint64
var totalL1MessagePopped uint64
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return nil, err
}
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// Add extra gas costs
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 16 // version in calldata
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
if parentBatch != nil {
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
}
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
totalL1CommitGas += types.GetKeccak256Gas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += types.CalldataNonZeroByteGas * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
}
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
for i, chunk := range dbChunks[1:] {
for i, chunk := range dbChunks {
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
// adjust batch data hash gas cost
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
totalL1CommitGas -= types.GetKeccak256Gas(32 * totalChunks)
totalChunks++
totalL1CommitGas += getKeccakGas(32 * totalChunks)
// adjust batch header hash gas cost
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += types.GetKeccak256Gas(32 * totalChunks)
// adjust batch header hash gas cost, batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas -= types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return dbChunks[:i+1], nil
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if i == 0 {
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
}
log.Debug("breaking limit condition in batching",
"currentTotalChunks", totalChunks,
"maxChunkNumPerBatch", p.maxChunkNumPerBatch,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks[:i], nil
}
}
p.batchChunksNum.Set(float64(len(dbChunks)))
var hasChunkTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -241,18 +244,16 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"first block timestamp", dbChunks[0].StartBlockTime,
"chunk outdated time threshold", currentTimeSec,
)
hasChunkTimeout = true
p.batchFirstChunkTimeoutReached.Inc()
p.batchFirstBlockTimeoutReached.Inc()
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks, nil
}
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
)
p.batchChunksSuperposeNotEnoughTotal.Inc()
return nil, nil
}
return dbChunks, nil
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
p.batchChunksProposeNotEnoughTotal.Inc()
return nil, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {

View File

@@ -23,11 +23,9 @@ func testBatchProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -37,7 +35,6 @@ func testBatchProposer(t *testing.T) {
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
bp.TryProposeBatch()

View File

@@ -51,11 +51,9 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxTxGasPerChunk uint64
maxL2TxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
minL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
@@ -70,22 +68,27 @@ type ChunkProposer struct {
totalTxGasUsed prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkBlockTimeoutReached prometheus.Counter
chunkBlocksSuperposeNotEnoughTotal prometheus.Counter
chunkFirstBlockTimeoutReached prometheus.Counter
chunkBlocksProposeNotEnoughTotal prometheus.Counter
}
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
log.Debug("new chunk proposer",
"maxL2TxNumPerChunk", cfg.MaxL2TxNumPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
"chunkTimeoutSec", cfg.ChunkTimeoutSec)
return &ChunkProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
@@ -130,13 +133,13 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
Name: "bridge_propose_chunk_chunk_block_number",
Help: "The number of blocks in the chunk",
}),
chunkBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
Help: "Total times of chunk's first block timeout reached",
}),
chunkBlocksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_superpose_not_enough_total",
Help: "Total number of chunk block superpose not enough",
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
Help: "Total number of chunk block propose not enough",
}),
}
}
@@ -188,97 +191,91 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
return nil, nil
}
chunk := &types.Chunk{Blocks: blocks[:1]}
firstBlock := chunk.Blocks[0]
totalTxGasUsed := firstBlock.Header.GasUsed
totalL2TxNum := firstBlock.L2TxsNum()
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
var chunk types.Chunk
var totalTxGasUsed uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
crc := chunkRowConsumption{}
totalL1CommitGas := chunk.EstimateL1CommitGas()
if err := crc.add(firstBlock.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
p.chunkL2TxNum.Set(float64(totalL2TxNum))
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
firstBlock.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
firstBlock.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
firstBlock.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
"The first block in chunk exceeds l2 tx gas limit",
"block number", firstBlock.Header.Number,
"gas used", totalTxGasUsed,
"max gas limit", p.maxTxGasPerChunk,
)
}
max := crc.max()
p.maxTxConsumption.Set(float64(max))
if max > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
firstBlock.Header.Number,
crc,
max,
p.maxRowConsumptionPerChunk,
)
}
for _, block := range blocks[1:] {
chunk.Blocks = append(chunk.Blocks, block)
for i, block := range blocks {
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas = chunk.EstimateL1CommitGas()
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if err := crc.add(block.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
crcMax := crc.max()
if totalTxGasUsed > p.maxTxGasPerChunk ||
totalL2TxNum > p.maxL2TxNumPerChunk ||
if totalL2TxNum > p.maxL2TxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
crc.max() > p.maxRowConsumptionPerChunk {
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
break
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
block.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
block.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
if crcMax > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
block.Header.Number,
crc,
crcMax,
p.maxRowConsumptionPerChunk,
)
}
}
log.Debug("breaking limit condition in chunking",
"totalL2TxNum", totalL2TxNum,
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
"chunkRowConsumptionMax", crcMax,
"chunkRowConsumption", crc,
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crcMax))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
}
chunk.Blocks = append(chunk.Blocks, block)
}
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
var hasBlockTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -286,17 +283,17 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"block timestamp", blocks[0].Header.Time,
"block outdated time threshold", currentTimeSec,
)
p.chunkBlockTimeoutReached.Inc()
hasBlockTimeout = true
p.chunkFirstBlockTimeoutReached.Inc()
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crc.max()))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
}
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
log.Warn("The calldata size of the chunk is less than the minimum limit",
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
)
p.chunkBlocksSuperposeNotEnoughTotal.Inc()
return nil, nil
}
return chunk, nil
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
p.chunkBlocksProposeNotEnoughTotal.Inc()
return nil, nil
}

View File

@@ -23,11 +23,9 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -55,11 +53,9 @@ func testChunkProposerRowConsumption(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 0, // !
ChunkTimeoutSec: 300,
}, db, nil)

View File

@@ -58,11 +58,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -77,7 +75,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
bp.TryProposeBatch()

View File

@@ -10,6 +10,14 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
)
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates keccak256 hash gas.
func GetKeccak256Gas(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
@@ -94,10 +102,6 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
var total uint64
var numL1Messages uint64
for _, txData := range w.Transactions {
@@ -120,9 +124,9 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
})
rlpTxData, _ := tx.MarshalBinary()
txPayloadLength := uint64(len(rlpTxData))
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
total += 16 * 4 // size of a uint32 field
total += getKeccakGas(txPayloadLength) // l2 tx hash
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 4 // size of a uint32 field
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
}
// sload

View File

@@ -146,14 +146,10 @@ func (c *Chunk) EstimateL1CommitGas() uint64 {
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas
}

View File

@@ -6,7 +6,7 @@ import (
"strings"
)
var tag = "v4.1.77"
var tag = "v4.1.83"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -44,5 +44,5 @@ func CheckScrollProverVersion(proverVersion string) bool {
return false
}
// compare the `scroll_prover` version
return remote[2] == local[2]
return remote[2] == local[2] || remote[2] == "8c439b1"
}

View File

@@ -69,7 +69,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("get prover version from context failed")
}
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", proverVersion.(string), version.Version)
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
}
isAssigned, err := bp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))

View File

@@ -37,8 +37,11 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
SetBaseURL(cfg.BaseURL).
AddRetryCondition(func(r *resty.Response, _ error) bool {
// Check for HTTP 5xx errors, e.g., coordinator is restarting.
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
return r.StatusCode() >= http.StatusInternalServerError
if r.StatusCode() >= http.StatusInternalServerError {
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
return true
}
return false
})
log.Info("successfully initialized prover client",