feat(relayer): estimate l1 batch commit gas (#659)

Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
This commit is contained in:
colin
2023-07-31 20:03:38 +08:00
committed by GitHub
parent df54d518cf
commit eb9070e1ae
9 changed files with 122 additions and 57 deletions

View File

@@ -78,14 +78,16 @@
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"min_l1_commit_calldata_size_per_chunk": 11234,
"chunk_timeout_sec": 300
"chunk_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
},
"batch_proposer_config": {
"max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"min_chunk_num_per_batch": 11,
"batch_timeout_sec": 300
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
}
},
"db_config": {

View File

@@ -28,19 +28,21 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
}
// BatchProposerConfig loads batch_proposer configuration items.
type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
}

View File

@@ -28,6 +28,7 @@ type BatchProposer struct {
maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
}
// NewBatchProposer creates a new BatchProposer instance.
@@ -43,6 +44,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
}
}
@@ -99,11 +101,40 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
firstChunk := dbChunks[0]
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
totalL1CommitGas := firstChunk.TotalL1CommitGas
var totalChunks uint64 = 1
totalChunks := uint64(1)
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return nil, err
}
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// Add extra gas costs
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 16 // version in calldata
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
if parentBatch != nil {
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
}
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if totalL1CommitGas > p.maxL1CommitGasPerBatch {
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
firstChunk.StartBlockNumber,
@@ -124,12 +155,21 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
}
for i, chunk := range dbChunks[1:] {
totalChunks++
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
// adjust batch data hash gas cost
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
totalChunks++
totalL1CommitGas += getKeccakGas(32 * totalChunks)
// adjust batch header hash gas cost
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalL1CommitGas > p.maxL1CommitGasPerBatch {
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return dbChunks[:i+1], nil
}
}

View File

@@ -28,6 +28,7 @@ type ChunkProposer struct {
maxL1CommitCalldataSizePerChunk uint64
minL1CommitCalldataSizePerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
}
// NewChunkProposer creates a new ChunkProposer instance.
@@ -43,6 +44,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
}
}
@@ -88,11 +90,12 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
return nil, nil
}
firstBlock := blocks[0]
chunk := &types.Chunk{Blocks: blocks[:1]}
firstBlock := chunk.Blocks[0]
totalTxGasUsed := firstBlock.Header.GasUsed
totalL2TxNum := firstBlock.L2TxsNum()
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
totalL1CommitGas := firstBlock.EstimateL1CommitGas()
totalL1CommitGas := chunk.EstimateL1CommitGas()
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
@@ -105,7 +108,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
)
}
if totalL1CommitGas > p.maxL1CommitGasPerChunk {
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
firstBlock.Header.Number,
@@ -133,16 +136,17 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
)
}
for i, block := range blocks[1:] {
for _, block := range blocks[1:] {
chunk.Blocks = append(chunk.Blocks, block)
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
totalL1CommitGas = chunk.EstimateL1CommitGas()
if totalTxGasUsed > p.maxTxGasPerChunk ||
totalL2TxNum > p.maxL2TxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalL1CommitGas > p.maxL1CommitGasPerChunk {
blocks = blocks[:i+1]
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
break
}
}
@@ -165,5 +169,5 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
)
return nil, nil
}
return &types.Chunk{Blocks: blocks}, nil
return chunk, nil
}

View File

@@ -134,6 +134,9 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
var latestBatch Batch
if err := db.First(&latestBatch).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Batch.GetLatestBatch error: %w", err)
}
return &latestBatch, nil
@@ -212,7 +215,7 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
}
parentBatch, err := o.GetLatestBatch(ctx)
if err != nil && !errors.Is(errors.Unwrap(err), gorm.ErrRecordNotFound) {
if err != nil {
log.Error("failed to get the latest batch", "err", err)
return nil, err
}

View File

@@ -149,12 +149,10 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
var totalL2TxGas uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
for _, block := range chunk.Blocks {
totalL2TxGas += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas += block.EstimateL1CommitGas()
}
numBlocks := len(chunk.Blocks)
@@ -168,7 +166,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
TotalL2TxGas: totalL2TxGas,
TotalL2TxNum: uint32(totalL2TxNum),
TotalL1CommitCalldataSize: uint32(totalL1CommitCalldataSize),
TotalL1CommitGas: totalL1CommitGas,
TotalL1CommitGas: chunk.EstimateL1CommitGas(),
StartBlockTime: chunk.Blocks[0].Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
TotalL1MessagesPoppedInChunk: uint32(chunk.NumL1Messages(totalL1MessagePoppedBefore)),

View File

@@ -10,10 +10,7 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
)
const nonZeroByteGas uint64 = 16
const zeroByteGas uint64 = 4
// WrappedBlock contains the block's Header, Transactions and WithdrawRoot hash.
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
@@ -78,16 +75,20 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
return size
}
// EstimateL1CommitGas calculates the calldata gas in l1 commit approximately.
// TODO: This will need to be adjusted.
// The part added here is only the calldata cost,
// but we have execution cost for verifying blocks / chunks / batches and storing the batch hash.
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
var total uint64
var numL1Messages uint64
for _, txData := range w.Transactions {
if txData.Type == types.L1MessageTxType {
numL1Messages++
continue
}
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
@@ -101,26 +102,19 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
for _, b := range rlpTxData {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
var txLen [4]byte
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
for _, b := range txLen {
if b == 0 {
total += zeroByteGas
} else {
total += nonZeroByteGas
}
}
txPayloadLength := uint64(len(rlpTxData))
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
total += 16 * 4 // size of a uint32 field
total += getKeccakGas(txPayloadLength) // l2 tx hash
}
// sload
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
// staticcall
total += 100 * numL1Messages // numL1Messages times call to L1MessageQueue
total += 100 * numL1Messages // numL1Messages times warm address access to L1MessageQueue
return total
}

View File

@@ -135,3 +135,25 @@ func (c *Chunk) Hash(totalL1MessagePoppedBefore uint64) (common.Hash, error) {
hash := crypto.Keccak256Hash(dataBytes)
return hash, nil
}
// EstimateL1CommitGas calculates the total L1 commit gas for this chunk approximately
func (c *Chunk) EstimateL1CommitGas() uint64 {
var totalTxNum uint64
var totalL1CommitGas uint64
for _, block := range c.Blocks {
totalTxNum += uint64(len(block.Transactions))
totalL1CommitGas += block.EstimateL1CommitGas()
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.33"
var tag = "v4.0.34"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {