Compare commits

..

5 Commits

Author SHA1 Message Date
georgehao
2764018553 feat: update 2023-08-18 16:46:24 +08:00
georgehao
4180105c45 feat: lint 2023-08-18 16:28:14 +08:00
georgehao
0292a772fd chore: auto version bump [bot] 2023-08-18 08:24:13 +00:00
georgehao
d2d75d8ee9 Merge branch 'develop' into fix/prover_bug 2023-08-18 16:23:55 +08:00
georgehao
fca6fc1579 feat: fix bug 2023-08-18 16:21:39 +08:00
46 changed files with 286 additions and 1292 deletions

View File

@@ -64,9 +64,11 @@
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
},
"chunk_proposer_config": {
"max_tx_gas_per_chunk": 1123456,
"max_l2_tx_num_per_chunk": 1123,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"min_l1_commit_calldata_size_per_chunk": 11234,
"chunk_timeout_sec": 300,
"max_row_consumption_per_chunk": 1048319,
"gas_cost_increase_multiplier": 1.2
@@ -75,6 +77,7 @@
"max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"min_chunk_num_per_batch": 11,
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
}

View File

@@ -28,9 +28,11 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
@@ -41,6 +43,7 @@ type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
}

View File

@@ -28,6 +28,7 @@ type BatchProposer struct {
maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
@@ -38,8 +39,8 @@ type BatchProposer struct {
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstBlockTimeoutReached prometheus.Counter
batchChunksProposeNotEnoughTotal prometheus.Counter
batchFirstChunkTimeoutReached prometheus.Counter
batchChunksSuperposeNotEnoughTotal prometheus.Counter
}
// NewBatchProposer creates a new BatchProposer instance.
@@ -53,6 +54,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
@@ -84,13 +86,13 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
Name: "bridge_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
}),
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_block_timeout_reached_total",
Help: "Total times of batch's first block timeout reached",
batchFirstChunkTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_chunk_timeout_reached_total",
Help: "Total times of batch's first chunk timeout reached",
}),
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
Help: "Total number of batch chunk propose not enough",
batchChunksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_superpose_not_enough_total",
Help: "Total number of batch chunk superpose not enough",
}),
}
}
@@ -151,78 +153,87 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return nil, nil
}
var totalL1CommitCalldataSize uint32
var totalL1CommitGas uint64
var totalChunks uint64
var totalL1MessagePopped uint64
firstChunk := dbChunks[0]
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
totalL1CommitGas := firstChunk.TotalL1CommitGas
totalChunks := uint64(1)
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return nil, err
}
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// Add extra gas costs
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 16 // version in calldata
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
if parentBatch != nil {
totalL1CommitGas += types.GetKeccak256Gas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += types.CalldataNonZeroByteGas * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
}
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
for i, chunk := range dbChunks {
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
for i, chunk := range dbChunks[1:] {
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
// adjust batch data hash gas cost
totalL1CommitGas -= types.GetKeccak256Gas(32 * totalChunks)
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
totalChunks++
totalL1CommitGas += types.GetKeccak256Gas(32 * totalChunks)
// adjust batch header hash gas cost, batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas -= types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(32 * totalChunks)
// adjust batch header hash gas cost
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if i == 0 {
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
}
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks[:i], nil
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return dbChunks[:i+1], nil
}
}
p.batchChunksNum.Set(float64(len(dbChunks)))
var hasChunkTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -230,16 +241,18 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"first block timestamp", dbChunks[0].StartBlockTime,
"chunk outdated time threshold", currentTimeSec,
)
p.batchFirstBlockTimeoutReached.Inc()
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks, nil
hasChunkTimeout = true
p.batchFirstChunkTimeoutReached.Inc()
}
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
p.batchChunksProposeNotEnoughTotal.Inc()
return nil, nil
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
)
p.batchChunksSuperposeNotEnoughTotal.Inc()
return nil, nil
}
return dbChunks, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {

View File

@@ -23,9 +23,11 @@ func testBatchProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -35,6 +37,7 @@ func testBatchProposer(t *testing.T) {
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
bp.TryProposeBatch()

View File

@@ -51,9 +51,11 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxTxGasPerChunk uint64
maxL2TxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
minL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
@@ -68,8 +70,8 @@ type ChunkProposer struct {
totalTxGasUsed prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkFirstBlockTimeoutReached prometheus.Counter
chunkBlocksProposeNotEnoughTotal prometheus.Counter
chunkBlockTimeoutReached prometheus.Counter
chunkBlocksSuperposeNotEnoughTotal prometheus.Counter
}
// NewChunkProposer creates a new ChunkProposer instance.
@@ -79,9 +81,11 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
@@ -126,13 +130,13 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
Name: "bridge_propose_chunk_chunk_block_number",
Help: "The number of blocks in the chunk",
}),
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
chunkBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
Help: "Total times of chunk's first block timeout reached",
}),
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
Help: "Total number of chunk block propose not enough",
chunkBlocksSuperposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_superpose_not_enough_total",
Help: "Total number of chunk block superpose not enough",
}),
}
}
@@ -184,80 +188,97 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
return nil, nil
}
var chunk types.Chunk
var totalTxGasUsed uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
chunk := &types.Chunk{Blocks: blocks[:1]}
firstBlock := chunk.Blocks[0]
totalTxGasUsed := firstBlock.Header.GasUsed
totalL2TxNum := firstBlock.L2TxsNum()
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
crc := chunkRowConsumption{}
totalL1CommitGas := chunk.EstimateL1CommitGas()
for i, block := range blocks {
if err := crc.add(firstBlock.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
p.chunkL2TxNum.Set(float64(totalL2TxNum))
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
firstBlock.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
firstBlock.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
firstBlock.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
"The first block in chunk exceeds l2 tx gas limit",
"block number", firstBlock.Header.Number,
"gas used", totalTxGasUsed,
"max gas limit", p.maxTxGasPerChunk,
)
}
max := crc.max()
p.maxTxConsumption.Set(float64(max))
if max > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
firstBlock.Header.Number,
crc,
max,
p.maxRowConsumptionPerChunk,
)
}
for _, block := range blocks[1:] {
chunk.Blocks = append(chunk.Blocks, block)
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas = chunk.EstimateL1CommitGas()
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if err := crc.add(block.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
crcMax := crc.max()
if totalL2TxNum > p.maxL2TxNumPerChunk ||
if totalTxGasUsed > p.maxTxGasPerChunk ||
totalL2TxNum > p.maxL2TxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
block.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
block.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
if crcMax > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
block.Header.Number,
crc,
crcMax,
p.maxRowConsumptionPerChunk,
)
}
}
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crcMax))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
crc.max() > p.maxRowConsumptionPerChunk {
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
break
}
chunk.Blocks = append(chunk.Blocks, block)
}
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
var hasBlockTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -265,17 +286,17 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"block timestamp", blocks[0].Header.Time,
"block outdated time threshold", currentTimeSec,
)
p.chunkFirstBlockTimeoutReached.Inc()
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crc.max()))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
p.chunkBlockTimeoutReached.Inc()
hasBlockTimeout = true
}
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
p.chunkBlocksProposeNotEnoughTotal.Inc()
return nil, nil
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
log.Warn("The calldata size of the chunk is less than the minimum limit",
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
)
p.chunkBlocksSuperposeNotEnoughTotal.Inc()
return nil, nil
}
return chunk, nil
}

View File

@@ -23,9 +23,11 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -53,9 +55,11 @@ func testChunkProposerRowConsumption(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 0, // !
ChunkTimeoutSec: 300,
}, db, nil)

View File

@@ -58,9 +58,11 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
@@ -75,6 +77,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
bp.TryProposeBatch()

View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"ark-std",
"env_logger 0.10.0",
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"ethers-core",
@@ -1049,7 +1049,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1226,7 +1226,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"geth-utils",
@@ -1439,7 +1439,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1479,7 +1479,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -1595,6 +1595,21 @@ dependencies = [
"rustc-hash",
]
[[package]]
name = "halo2-base"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"halo2_proofs",
"itertools",
"num-bigint",
"num-integer",
"num-traits",
"rand_chacha",
"rustc-hash",
]
[[package]]
name = "halo2-ecc"
version = "0.2.2"
@@ -1602,7 +1617,26 @@ source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74
dependencies = [
"ff",
"group",
"halo2-base",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"itertools",
"num-bigint",
"num-integer",
"num-traits",
"rand",
"rand_chacha",
"rand_core",
"serde",
"serde_json",
]
[[package]]
name = "halo2-ecc"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"group",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"itertools",
"num-bigint",
"num-integer",
@@ -1633,7 +1667,7 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?tag=v0.5.1#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=v0.5#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
dependencies = [
"ethers-core",
"halo2_proofs",
@@ -1655,7 +1689,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#19de67c07a9b9b567580466763f93ebfbc3bb799"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#b612b1e2a9fa2ccd150a6cb99e67592c8d62cd99"
dependencies = [
"ark-std",
"blake2b_simd",
@@ -2077,7 +2111,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2264,7 +2298,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"eth-types",
"ethers-core",
@@ -2279,7 +2313,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2754,8 +2788,8 @@ dependencies = [
[[package]]
name = "prover"
version = "0.6.4"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.5#ccb3cd457080dcfdf8ae59416eb47ae9b6b6ddd6"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.2#8c439b1dd62c429223221484fb8a5470242d1cbc"
dependencies = [
"aggregator",
"anyhow",
@@ -3628,8 +3662,8 @@ source = "git+https://github.com/scroll-tech//snark-verifier?tag=v0.1.1#11a09d4a
dependencies = [
"bytes",
"ethereum-types 0.14.1",
"halo2-base",
"halo2-ecc",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"halo2-ecc 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"hex",
"itertools",
"lazy_static",
@@ -3653,7 +3687,7 @@ dependencies = [
"bincode",
"env_logger 0.10.0",
"ethereum-types 0.14.1",
"halo2-base",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0)",
"hex",
"itertools",
"lazy_static",
@@ -4039,8 +4073,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.6.4"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.5#ccb3cd457080dcfdf8ae59416eb47ae9b6b6ddd6"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.6.2#8c439b1dd62c429223221484fb8a5470242d1cbc"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4491,7 +4525,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.5#0cafc84c6dce2c0f385b36445fb3c61ca30d28ef"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.6.2#750169c0e7282c267c8216c800ed7c1b7e021b16"
dependencies = [
"array-init",
"bus-mapping",
@@ -4501,8 +4535,8 @@ dependencies = [
"ethers-core",
"ethers-signers",
"gadgets",
"halo2-base",
"halo2-ecc",
"halo2-base 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"halo2-ecc 0.2.2 (git+https://github.com/scroll-tech/halo2-lib?branch=develop)",
"halo2_proofs",
"hex",
"itertools",

View File

@@ -24,8 +24,8 @@ snark-verifier = { git = "https://github.com/scroll-tech//snark-verifier", tag =
snark-verifier-sdk = { git = "https://github.com/scroll-tech//snark-verifier", tag = "v0.1.1" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.5" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.5" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.2" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.6.2" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
log = "0.4"

View File

@@ -10,14 +10,6 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
)
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates keccak256 hash gas.
func GetKeccak256Gas(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
@@ -102,6 +94,10 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
var total uint64
var numL1Messages uint64
for _, txData := range w.Transactions {
@@ -124,9 +120,9 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
})
rlpTxData, _ := tx.MarshalBinary()
txPayloadLength := uint64(len(rlpTxData))
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 4 // size of a uint32 field
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
total += 16 * 4 // size of a uint32 field
total += getKeccakGas(txPayloadLength) // l2 tx hash
}
// sload

View File

@@ -146,10 +146,14 @@ func (c *Chunk) EstimateL1CommitGas() uint64 {
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas
}

View File

@@ -6,7 +6,7 @@ import (
"strings"
)
var tag = "v4.1.80"
var tag = "v4.1.72"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -44,5 +44,5 @@ func CheckScrollProverVersion(proverVersion string) bool {
return false
}
// compare the `scroll_prover` version
return remote[2] == local[2] || remote[2] == "8c439b1"
return remote[2] == local[2]
}

View File

@@ -260,23 +260,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -371,22 +354,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -563,23 +530,6 @@ Emitted when some ERC1155 token is refunded.
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -227,23 +227,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -502,23 +469,6 @@ Emitted when some ERC721 token is refunded.
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -239,23 +239,6 @@ Mapping from queue index to previous replay queue index.
|---|---|---|
| _0 | uint256 | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessageWithProof
```solidity
@@ -453,22 +436,6 @@ Update max replay times.
|---|---|---|
| _newMaxReplayTimes | uint256 | The new max replay times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### xDomainMessageSender
```solidity
@@ -642,22 +609,5 @@ Emitted when the maximum number of times each message can be replayed is updated
| oldMaxReplayTimes | uint256 | undefined |
| newMaxReplayTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -225,23 +225,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -292,22 +275,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -405,22 +372,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -223,23 +223,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -290,22 +273,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -403,22 +370,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -205,23 +205,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -488,23 +455,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -174,23 +174,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -263,22 +246,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -430,23 +397,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity

View File

@@ -194,23 +194,6 @@ function paused() external view returns (bool)
|---|---|---|
| _0 | bool | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessage
```solidity
@@ -345,22 +328,6 @@ Update max failed execution times.
|---|---|---|
| _newMaxFailedExecutionTimes | uint256 | The new max failed execution times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### xDomainMessageSender
```solidity
@@ -534,22 +501,5 @@ Emitted when the maximum number of times each message can fail in L2 is updated.
| oldMaxFailedExecutionTimes | uint256 | undefined |
| newMaxFailedExecutionTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -139,23 +139,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -223,22 +206,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -354,23 +321,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -172,23 +172,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -239,22 +222,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -370,23 +337,6 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -149,7 +149,7 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
// @note check more `_to` address to avoid attack in the future when we add more gateways.
require(_to != messageQueue, "Forbid to call message queue");
_validateTargetAddress(_to);
require(_to != address(this), "Forbid to call self");
// @note This usually will never happen, just in case.
require(_from != xDomainMessageSender, "Invalid message sender");
@@ -312,8 +312,6 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
uint256 _gasLimit,
address _refundAddress
) internal nonReentrant {
_addUsedAmount(_value);
address _messageQueue = messageQueue; // gas saving
address _counterpart = counterpart; // gas saving

View File

@@ -161,9 +161,6 @@ abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, Scrol
// ignore weird fee on transfer token
require(_amount > 0, "deposit zero amount");
// rate limit
_addUsedAmount(_token, _amount);
return (_from, _amount, _data);
}

View File

@@ -124,8 +124,6 @@ contract L1ETHGateway is ScrollGatewayBase, IL1ETHGateway, IMessageDropCallback
(_from, _data) = abi.decode(_data, (address, bytes));
}
// @note no rate limit here, since ETH is limited in messenger
// 2. Generate message passed to L1ScrollMessenger.
bytes memory _message = abi.encodeCall(IL2ETHGateway.finalizeDepositETH, (_from, _to, _amount, _data));

View File

@@ -137,7 +137,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
uint256 _gasLimit
) internal nonReentrant {
require(msg.value == _value, "msg.value mismatch");
_addUsedAmount(_value);
uint256 _nonce = L2MessageQueue(messageQueue).nextMessageIndex();
bytes32 _xDomainCalldataHash = keccak256(_encodeXDomainCalldata(msg.sender, _to, _value, _nonce, _message));
@@ -166,7 +165,7 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
) internal {
// @note check more `_to` address to avoid attack in the future when we add more gateways.
require(_to != messageQueue, "Forbid to call message queue");
_validateTargetAddress(_to);
require(_to != address(this), "Forbid to call self");
// @note This usually will never happen, just in case.
require(_from != xDomainMessageSender, "Invalid message sender");

View File

@@ -126,9 +126,6 @@ contract L2CustomERC20Gateway is L2ERC20Gateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// rate limit
_addUsedAmount(_token, _amount);
// 2. Burn token.
IScrollERC20Upgradeable(_token).burn(_from, _amount);

View File

@@ -98,8 +98,6 @@ contract L2ETHGateway is ScrollGatewayBase, IL2ETHGateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// @note no rate limit here, since ETH is limited in messenger
bytes memory _message = abi.encodeCall(IL1ETHGateway.finalizeWithdrawETH, (_from, _to, _amount, _data));
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, _amount, _message, _gasLimit);

View File

@@ -140,9 +140,6 @@ contract L2StandardERC20Gateway is L2ERC20Gateway {
address _l1Token = tokenMapping[_token];
require(_l1Token != address(0), "no corresponding l1 token");
// rate limit
_addUsedAmount(_token, _amount);
// 2. Burn token.
IScrollERC20Upgradeable(_token).burn(_from, _amount);

View File

@@ -116,9 +116,6 @@ contract L2WETHGateway is L2ERC20Gateway {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// rate limit
_addUsedAmount(_token, _amount);
// 2. Transfer token into this contract.
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);
IWETH(_token).withdraw(_amount);

View File

@@ -7,7 +7,6 @@ import {PausableUpgradeable} from "@openzeppelin/contracts-upgradeable/security/
import {ReentrancyGuardUpgradeable} from "@openzeppelin/contracts-upgradeable/security/ReentrancyGuardUpgradeable.sol";
import {ScrollConstants} from "./constants/ScrollConstants.sol";
import {IETHRateLimiter} from "../rate-limiter/IETHRateLimiter.sol";
import {IScrollMessenger} from "./IScrollMessenger.sol";
// solhint-disable var-name-mixedcase
@@ -27,11 +26,6 @@ abstract contract ScrollMessengerBase is
/// @param _newFeeVault The address of new fee vault contract.
event UpdateFeeVault(address _oldFeeVault, address _newFeeVault);
/// @notice Emitted when owner updates rate limiter contract.
/// @param _oldRateLimiter The address of old rate limiter contract.
/// @param _newRateLimiter The address of new rate limiter contract.
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter);
/*************
* Variables *
*************/
@@ -45,11 +39,8 @@ abstract contract ScrollMessengerBase is
/// @notice The address of fee vault, collecting cross domain messaging fee.
address public feeVault;
/// @notice The address of ETH rate limiter contract.
address public rateLimiter;
/// @dev The storage slots for future usage.
uint256[46] private __gap;
uint256[47] private __gap;
/**********************
* Function Modifiers *
@@ -98,16 +89,6 @@ abstract contract ScrollMessengerBase is
emit UpdateFeeVault(_oldFeeVault, _newFeeVault);
}
/// @notice Update rate limiter contract.
/// @dev This function can only called by contract owner.
/// @param _newRateLimiter The address of new rate limiter contract.
function updateRateLimiter(address _newRateLimiter) external onlyOwner {
address _oldRateLimiter = rateLimiter;
rateLimiter = _newRateLimiter;
emit UpdateRateLimiter(_oldRateLimiter, _newRateLimiter);
}
/// @notice Pause the contract
/// @dev This function can only called by contract owner.
/// @param _status The pause status to update.
@@ -147,27 +128,4 @@ abstract contract ScrollMessengerBase is
_message
);
}
/// @dev Internal function to increase ETH usage for the given `_sender`.
/// @param _amount The amount of ETH used.
function _addUsedAmount(uint256 _amount) internal {
if (_amount == 0) return;
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
IETHRateLimiter(_rateLimiter).addUsedAmount(_amount);
}
}
/// @dev Internal function to check whether the `_target` address is allowed to avoid attack.
/// @param _target The address of target address to check.
function _validateTargetAddress(address _target) internal view {
// @note check more `_target` address to avoid attack in the future when we add more external contracts.
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
require(_target != _rateLimiter, "Forbid to call rate limiter");
}
require(_target != address(this), "Forbid to call self");
}
}

View File

@@ -9,18 +9,8 @@ import {IScrollGateway} from "./IScrollGateway.sol";
import {IScrollMessenger} from "../IScrollMessenger.sol";
import {IScrollGatewayCallback} from "../callbacks/IScrollGatewayCallback.sol";
import {ScrollConstants} from "../constants/ScrollConstants.sol";
import {ITokenRateLimiter} from "../../rate-limiter/ITokenRateLimiter.sol";
abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgradeable, IScrollGateway {
/**********
* Events *
**********/
/// @notice Emitted when owner updates rate limiter contract.
/// @param _oldRateLimiter The address of old rate limiter contract.
/// @param _newRateLimiter The address of new rate limiter contract.
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter);
/*************
* Variables *
*************/
@@ -34,11 +24,8 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
/// @inheritdoc IScrollGateway
address public override messenger;
/// @notice The address of token rate limiter contract.
address public rateLimiter;
/// @dev The storage slots for future usage.
uint256[46] private __gap;
uint256[47] private __gap;
/**********************
* Function Modifiers *
@@ -85,20 +72,6 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
}
}
/************************
* Restricted Functions *
************************/
/// @notice Update rate limiter contract.
/// @dev This function can only called by contract owner.
/// @param _newRateLimiter The address of new rate limiter contract.
function updateRateLimiter(address _newRateLimiter) external onlyOwner {
address _oldRateLimiter = rateLimiter;
rateLimiter = _newRateLimiter;
emit UpdateRateLimiter(_oldRateLimiter, _newRateLimiter);
}
/**********************
* Internal Functions *
**********************/
@@ -111,16 +84,4 @@ abstract contract ScrollGatewayBase is ReentrancyGuardUpgradeable, OwnableUpgrad
IScrollGatewayCallback(_to).onScrollGatewayCallback(_data);
}
}
/// @dev Internal function to increase token usage for the given `_sender`.
/// @param _token The address of token.
/// @param _amount The amount of token used.
function _addUsedAmount(address _token, uint256 _amount) internal {
if (_amount == 0) return;
address _rateLimiter = rateLimiter;
if (_rateLimiter != address(0)) {
ITokenRateLimiter(_rateLimiter).addUsedAmount(_token, _amount);
}
}
}

View File

@@ -1,116 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {SafeCast} from "@openzeppelin/contracts/utils/math/SafeCast.sol";
import {IETHRateLimiter} from "./IETHRateLimiter.sol";
// solhint-disable func-name-mixedcase
// solhint-disable not-rely-on-time
contract ETHRateLimiter is Ownable, IETHRateLimiter {
/***********
* Structs *
***********/
struct TokenAmount {
// The timestamp when the amount is updated.
uint48 lastUpdateTs;
// The ETH limit in wei.
uint104 limit;
// The amount of ETH in current period.
uint104 amount;
}
/*************
* Constants *
*************/
/// @notice The period length in seconds.
/// @dev The time frame for the `k`-th period is `[periodDuration * k, periodDuration * (k + 1))`.
uint256 public immutable periodDuration;
/// @notice The address of ETH spender.
address public immutable spender;
/*************
* Variables *
*************/
/// @notice The token amount used in current period.
TokenAmount public currentPeriod;
/***************
* Constructor *
***************/
constructor(
uint256 _periodDuration,
address _spender,
uint104 _totalLimit
) {
if (_periodDuration == 0) {
revert PeriodIsZero();
}
if (_totalLimit == 0) {
revert TotalLimitIsZero();
}
periodDuration = _periodDuration;
spender = _spender;
currentPeriod.limit = _totalLimit;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IETHRateLimiter
function addUsedAmount(uint256 _amount) external override {
if (msg.sender != spender) {
revert CallerNotSpender();
}
if (_amount == 0) return;
uint256 _currentPeriodStart = (block.timestamp / periodDuration) * periodDuration;
// check total limit
uint256 _currentTotalAmount;
TokenAmount memory _currentPeriod = currentPeriod;
if (_currentPeriod.lastUpdateTs < _currentPeriodStart) {
_currentTotalAmount = _amount;
} else {
_currentTotalAmount = _currentPeriod.amount + _amount;
}
if (_currentTotalAmount > _currentPeriod.limit) {
revert ExceedTotalLimit();
}
_currentPeriod.lastUpdateTs = uint48(block.timestamp);
_currentPeriod.amount = SafeCast.toUint104(_currentTotalAmount);
currentPeriod = _currentPeriod;
}
/************************
* Restricted Functions *
************************/
/// @notice Update the total token amount limit.
/// @param _newTotalLimit The new total limit.
function updateTotalLimit(uint104 _newTotalLimit) external onlyOwner {
if (_newTotalLimit == 0) {
revert TotalLimitIsZero();
}
uint256 _oldTotalLimit = currentPeriod.limit;
currentPeriod.limit = _newTotalLimit;
emit UpdateTotalLimit(_oldTotalLimit, _newTotalLimit);
}
}

View File

@@ -1,38 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface IETHRateLimiter {
/**********
* Events *
**********/
/// @notice Emitted when the total limit is updated.
/// @param oldTotalLimit The previous value of total limit before updating.
/// @param newTotalLimit The current value of total limit after updating.
event UpdateTotalLimit(uint256 oldTotalLimit, uint256 newTotalLimit);
/**********
* Errors *
**********/
/// @dev Thrown when the `periodDuration` is initialized to zero.
error PeriodIsZero();
/// @dev Thrown when the `totalAmount` is initialized to zero.
error TotalLimitIsZero();
/// @dev Thrown when an amount breaches the total limit in the period.
error ExceedTotalLimit();
/// @dev Thrown when the call is not spender.
error CallerNotSpender();
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Request some ETH usage for `sender`.
/// @param _amount The amount of ETH to use.
function addUsedAmount(uint256 _amount) external;
}

View File

@@ -1,38 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface ITokenRateLimiter {
/**********
* Events *
**********/
/// @notice Emitted when the total limit is updated.
/// @param oldTotalLimit The previous value of total limit before updating.
/// @param newTotalLimit The current value of total limit after updating.
event UpdateTotalLimit(address indexed token, uint256 oldTotalLimit, uint256 newTotalLimit);
/**********
* Errors *
**********/
/// @dev Thrown when the `periodDuration` is initialized to zero.
error PeriodIsZero();
/// @dev Thrown when the `totalAmount` is initialized to zero.
/// @param token The address of the token.
error TotalLimitIsZero(address token);
/// @dev Thrown when an amount breaches the total limit in the period.
/// @param token The address of the token.
error ExceedTotalLimit(address token);
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Request some token usage for `sender`.
/// @param token The address of the token.
/// @param amount The amount of token to use.
function addUsedAmount(address token, uint256 amount) external;
}

View File

@@ -1,106 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
import {SafeCast} from "@openzeppelin/contracts/utils/math/SafeCast.sol";
import {ITokenRateLimiter} from "./ITokenRateLimiter.sol";
// solhint-disable func-name-mixedcase
// solhint-disable not-rely-on-time
contract TokenRateLimiter is AccessControlEnumerable, ITokenRateLimiter {
/***********
* Structs *
***********/
struct TokenAmount {
// The timestamp when the amount is updated.
uint48 lastUpdateTs;
// The token limit.
uint104 limit;
// The amount of token in current period.
uint104 amount;
}
/*************
* Constants *
*************/
/// @notice The role for token spender.
bytes32 public constant TOKEN_SPENDER_ROLE = keccak256("TOKEN_SPENDER_ROLE");
/// @notice The period length in seconds.
/// @dev The time frame for the `k`-th period is `[periodDuration * k, periodDuration * (k + 1))`.
uint256 public immutable periodDuration;
/*************
* Variables *
*************/
/// @notice Mapping from token address to the total amounts used in current period and total token amount limit.
mapping(address => TokenAmount) public currentPeriod;
/// @dev The storage slots for future usage.
uint256[49] private __gap;
/***************
* Constructor *
***************/
constructor(uint256 _periodDuration) {
if (_periodDuration == 0) {
revert PeriodIsZero();
}
_setupRole(DEFAULT_ADMIN_ROLE, msg.sender);
periodDuration = _periodDuration;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc ITokenRateLimiter
function addUsedAmount(address _token, uint256 _amount) external override onlyRole(TOKEN_SPENDER_ROLE) {
if (_amount == 0) return;
uint256 _currentPeriodStart = (block.timestamp / periodDuration) * periodDuration;
// check total limit, `0` means no limit at all.
uint256 _currentTotalAmount;
TokenAmount memory _currentPeriod = currentPeriod[_token];
if (_currentPeriod.lastUpdateTs < _currentPeriodStart) {
_currentTotalAmount = _amount;
} else {
_currentTotalAmount = _currentPeriod.amount + _amount;
}
if (_currentPeriod.limit != 0 && _currentTotalAmount > _currentPeriod.limit) {
revert ExceedTotalLimit(_token);
}
_currentPeriod.lastUpdateTs = uint48(block.timestamp);
_currentPeriod.amount = SafeCast.toUint104(_currentTotalAmount);
currentPeriod[_token] = _currentPeriod;
}
/************************
* Restricted Functions *
************************/
/// @notice Update the total token amount limit.
/// @param _newTotalLimit The new total limit.
function updateTotalLimit(address _token, uint104 _newTotalLimit) external onlyRole(DEFAULT_ADMIN_ROLE) {
if (_newTotalLimit == 0) {
revert TotalLimitIsZero(_token);
}
uint256 _oldTotalLimit = currentPeriod[_token].limit;
currentPeriod[_token].limit = _newTotalLimit;
emit UpdateTotalLimit(_token, _oldTotalLimit, _newTotalLimit);
}
}

View File

@@ -1,75 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {ETHRateLimiter} from "../rate-limiter/ETHRateLimiter.sol";
import {IETHRateLimiter} from "../rate-limiter/IETHRateLimiter.sol";
contract ETHRateLimiterTest is DSTestPlus {
event UpdateTotalLimit(uint256 oldTotalLimit, uint256 newTotalLimit);
ETHRateLimiter private limiter;
function setUp() public {
hevm.warp(86400);
limiter = new ETHRateLimiter(86400, address(this), 100 ether);
}
function testUpdateTotalLimit(uint104 _newTotalLimit) external {
hevm.assume(_newTotalLimit > 0);
// not owner, revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
limiter.updateTotalLimit(_newTotalLimit);
hevm.stopPrank();
// zero revert
hevm.expectRevert(IETHRateLimiter.TotalLimitIsZero.selector);
limiter.updateTotalLimit(0);
// success
hevm.expectEmit(false, false, false, true);
emit UpdateTotalLimit(100 ether, _newTotalLimit);
limiter.updateTotalLimit(_newTotalLimit);
(, uint104 _totalLimit, ) = limiter.currentPeriod();
assertEq(_totalLimit, _newTotalLimit);
}
function testAddUsedAmount() external {
// non-spender, revert
hevm.startPrank(address(1));
hevm.expectRevert(IETHRateLimiter.CallerNotSpender.selector);
limiter.addUsedAmount(0);
hevm.stopPrank();
// exceed total limit on first call
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(100 ether + 1);
_checkTotalCurrentPeriodAmountAmount(0);
// exceed total limit on second call
limiter.addUsedAmount(50 ether);
_checkTotalCurrentPeriodAmountAmount(50 ether);
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(50 ether + 1);
_checkTotalCurrentPeriodAmountAmount(50 ether);
// one period passed
hevm.warp(86400 * 2);
limiter.addUsedAmount(1 ether);
_checkTotalCurrentPeriodAmountAmount(1 ether);
// exceed
hevm.expectRevert(IETHRateLimiter.ExceedTotalLimit.selector);
limiter.addUsedAmount(99 ether + 1);
_checkTotalCurrentPeriodAmountAmount(1 ether);
}
function _checkTotalCurrentPeriodAmountAmount(uint256 expected) internal {
(, , uint256 totalAmount) = limiter.currentPeriod();
assertEq(totalAmount, expected);
}
}

View File

@@ -42,26 +42,6 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
l1Messenger.relayMessageWithProof(address(this), address(messageQueue), 0, 0, new bytes(0), proof);
}
function testForbidCallRateLimiterFromL2() external {
l1Messenger.updateRateLimiter(address(1));
bytes32 _xDomainCalldataHash = keccak256(
abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(this),
address(1),
0,
0,
new bytes(0)
)
);
prepareL2MessageRoot(_xDomainCalldataHash);
IL1ScrollMessenger.L2MessageProof memory proof;
proof.batchIndex = rollup.lastFinalizedBatchIndex();
hevm.expectRevert("Forbid to call rate limiter");
l1Messenger.relayMessageWithProof(address(this), address(1), 0, 0, new bytes(0), proof);
}
function testForbidCallSelfFromL2() external {
bytes32 _xDomainCalldataHash = keccak256(
abi.encodeWithSignature(

View File

@@ -51,15 +51,10 @@ contract L2ScrollMessengerTest is DSTestPlus {
}
function testForbidCallFromL1() external {
l2Messenger.updateRateLimiter(address(1));
hevm.startPrank(AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger)));
hevm.expectRevert("Forbid to call message queue");
l2Messenger.relayMessage(address(this), address(l2MessageQueue), 0, 0, new bytes(0));
hevm.expectRevert("Forbid to call rate limiter");
l2Messenger.relayMessage(address(this), address(1), 0, 0, new bytes(0));
hevm.expectRevert("Forbid to call self");
l2Messenger.relayMessage(address(this), address(l2Messenger), 0, 0, new bytes(0));
hevm.stopPrank();

View File

@@ -1,82 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
import {TokenRateLimiter} from "../rate-limiter/TokenRateLimiter.sol";
import {ITokenRateLimiter} from "../rate-limiter/ITokenRateLimiter.sol";
contract TokenRateLimiterTest is DSTestPlus {
event UpdateTotalLimit(address indexed token, uint256 oldTotalLimit, uint256 newTotalLimit);
TokenRateLimiter private limiter;
function setUp() public {
hevm.warp(86400);
limiter = new TokenRateLimiter(86400);
}
function testUpdateTotalLimit(address _token, uint104 _newTotalLimit) external {
hevm.assume(_newTotalLimit > 0);
// not admin, revert
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x0000000000000000000000000000000000000000000000000000000000000000"
);
limiter.updateTotalLimit(_token, _newTotalLimit);
hevm.stopPrank();
// zero revert
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.TotalLimitIsZero.selector, _token));
limiter.updateTotalLimit(_token, 0);
// success
hevm.expectEmit(true, false, false, true);
emit UpdateTotalLimit(_token, 0 ether, _newTotalLimit);
limiter.updateTotalLimit(_token, _newTotalLimit);
(, uint104 _totalLimit, ) = limiter.currentPeriod(_token);
assertEq(_totalLimit, _newTotalLimit);
}
function testAddUsedAmount(address _token) external {
// non-spender, revert
hevm.startPrank(address(1));
hevm.expectRevert(
"AccessControl: account 0x0000000000000000000000000000000000000001 is missing role 0x267f05081a073059ae452e6ac77ec189636e43e41051d4c3ec760734b3d173cb"
);
limiter.addUsedAmount(_token, 0);
hevm.stopPrank();
limiter.grantRole(bytes32(0x267f05081a073059ae452e6ac77ec189636e43e41051d4c3ec760734b3d173cb), address(this));
limiter.updateTotalLimit(_token, 100 ether);
// exceed total limit on first call
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 100 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 0);
// exceed total limit on second call
limiter.addUsedAmount(_token, 50 ether);
_checkTotalCurrentPeriodAmountAmount(_token, 50 ether);
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 50 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 50 ether);
// one period passed
hevm.warp(86400 * 2);
limiter.addUsedAmount(_token, 1 ether);
_checkTotalCurrentPeriodAmountAmount(_token, 1 ether);
// exceed
hevm.expectRevert(abi.encodeWithSelector(ITokenRateLimiter.ExceedTotalLimit.selector, _token));
limiter.addUsedAmount(_token, 99 ether + 1);
_checkTotalCurrentPeriodAmountAmount(_token, 1 ether);
}
function _checkTotalCurrentPeriodAmountAmount(address token, uint256 expected) internal {
(, , uint256 totalAmount) = limiter.currentPeriod(token);
assertEq(totalAmount, expected);
}
}

View File

@@ -69,7 +69,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, fmt.Errorf("get prover version from context failed")
}
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", proverVersion.(string), version.Version)
}
isAssigned, err := bp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))

View File

@@ -128,7 +128,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
return fmt.Errorf("get public key from context failed")
}
pv := ctx.GetString(coordinatorType.ProverVersion)
if len(pv) == 0 {
if len(pk) == 0 {
return fmt.Errorf("get ProverVersion from context failed")
}
@@ -148,7 +148,8 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
return err
}
m.verifierTotal.WithLabelValues(pv).Inc()
proverVersion := ctx.GetString(coordinatorType.ProverVersion)
m.verifierTotal.WithLabelValues(proverVersion).Inc()
var success bool
var verifyErr error
@@ -159,7 +160,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
}
if verifyErr != nil || !success {
m.verifierFailureTotal.WithLabelValues(pv).Inc()
m.verifierFailureTotal.WithLabelValues(proverVersion).Inc()
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
@@ -309,8 +310,8 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
log.Info("update proof status skip because this chunk / batch has been verified", "hash", hash, "public key", proverPublicKey)
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
log.Info("update proof status ProvingTaskFailed skip because other prover have prove success", "hash", hash, "public key", proverPublicKey)
return nil
}

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 8, int(cur))
assert.Equal(t, 7, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -37,11 +37,8 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
SetBaseURL(cfg.BaseURL).
AddRetryCondition(func(r *resty.Response, _ error) bool {
// Check for HTTP 5xx errors, e.g., coordinator is restarting.
if r.StatusCode() >= http.StatusInternalServerError {
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
return true
}
return false
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
return r.StatusCode() >= http.StatusInternalServerError
})
log.Info("successfully initialized prover client",
@@ -179,24 +176,23 @@ func (c *CoordinatorClient) SubmitProof(ctx context.Context, req *SubmitProofReq
if err != nil {
log.Error("submit proof request failed: %v", err)
return fmt.Errorf("submit proof request failed: %w", ErrCoordinatorConnect)
return fmt.Errorf("submit proof request failed: %w", ConnectErr)
}
if resp.StatusCode() != 200 {
log.Error("failed to submit proof, status code: %v", resp.StatusCode())
return fmt.Errorf("failed to submit proof, status code not 200: %w", ErrCoordinatorConnect)
return fmt.Errorf("failed to submit proof, status code not 200: %w", ConnectErr)
}
if result.ErrCode == types.ErrJWTTokenExpired {
log.Info("JWT expired, attempting to re-login")
if err := c.Login(ctx); err != nil {
log.Error("JWT expired, re-login failed: %v", err)
return fmt.Errorf("JWT expired, re-login failed: %w", ErrCoordinatorConnect)
return fmt.Errorf("JWT expired, re-login failed: %w", ConnectErr)
}
log.Info("re-login success")
return c.SubmitProof(ctx, req)
}
if result.ErrCode != types.Success {
return fmt.Errorf("error code: %v, error message: %v", result.ErrCode, result.ErrMsg)
}

View File

@@ -6,8 +6,7 @@ import (
"scroll-tech/common/types/message"
)
// ErrCoordinatorConnect connect to coordinator error
var ErrCoordinatorConnect = errors.New("connect coordinator error")
var ConnectErr = errors.New("connect coordinator error")
// ChallengeResponse defines the response structure for random API
type ChallengeResponse struct {

View File

@@ -162,7 +162,7 @@ func (r *Prover) proveAndSubmit() error {
proofMsg, err = r.prove(task)
if err != nil { // handling error from prove
log.Error("failed to prove task", "task_type", task.Task.Type, "task-id", task.Task.ID, "err", err)
return r.submitErr(task, message.ProofFailureNoPanic, err)
return r.submitErr(task, true, message.ProofFailureNoPanic, err)
}
return r.submitProof(proofMsg)
}
@@ -322,7 +322,7 @@ func (r *Prover) submitProof(msg *message.ProofDetail) error {
// send the submit request
if err := r.coordinatorClient.SubmitProof(r.ctx, req); err != nil {
if !errors.Is(errors.Unwrap(err), client.ErrCoordinatorConnect) {
if !errors.Is(errors.Unwrap(err), client.ConnectErr) {
if deleteErr := r.stack.Delete(msg.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", msg.Type, "task_id", msg.ID, "err", deleteErr)
}
@@ -330,15 +330,12 @@ func (r *Prover) submitProof(msg *message.ProofDetail) error {
return fmt.Errorf("error submitting proof: %v", err)
}
if deleteErr := r.stack.Delete(msg.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", msg.Type, "task_id", msg.ID, "err", deleteErr)
}
log.Info("proof submitted successfully", "task-id", msg.ID, "task-type", msg.Type, "task-status", msg.Status, "err", msg.Error)
return nil
}
func (r *Prover) submitErr(task *store.ProvingTask, proofFailureType message.ProofFailureType, err error) error {
func (r *Prover) submitErr(task *store.ProvingTask, isRetry bool, proofFailureType message.ProofFailureType, err error) error {
// prepare the submit request
req := &client.SubmitProofRequest{
TaskID: task.Task.ID,
@@ -351,16 +348,13 @@ func (r *Prover) submitErr(task *store.ProvingTask, proofFailureType message.Pro
// send the submit request
if submitErr := r.coordinatorClient.SubmitProof(r.ctx, req); submitErr != nil {
if !errors.Is(errors.Unwrap(err), client.ErrCoordinatorConnect) {
if !errors.Is(errors.Unwrap(err), client.ConnectErr) {
if deleteErr := r.stack.Delete(task.Task.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", task.Task.Type, "task_id", task.Task.ID, "err", deleteErr)
}
}
return fmt.Errorf("error submitting proof: %v", submitErr)
}
if deleteErr := r.stack.Delete(task.Task.ID); deleteErr != nil {
log.Error("prover stack pop failed", "task_type", task.Task.Type, "task_id", task.Task.ID, "err", deleteErr)
}
log.Info("proof submitted report failure successfully",
"task-id", task.Task.ID, "task-type", task.Task.Type,