mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 23:48:15 -05:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c71a6d22a | ||
|
|
87f18efba8 | ||
|
|
fecd129a39 | ||
|
|
663156984f | ||
|
|
3499c595e7 | ||
|
|
95d2df46e3 | ||
|
|
102d29c54d | ||
|
|
7d50699344 | ||
|
|
e08b800d1d | ||
|
|
6139ca0df0 | ||
|
|
24a0fd08ac | ||
|
|
2840485f38 | ||
|
|
dab21fc712 | ||
|
|
c44b7f7bf4 | ||
|
|
a8c71b5e36 | ||
|
|
ae2f62df00 | ||
|
|
ce5c6e0aa3 | ||
|
|
e8ddf99184 | ||
|
|
ebf2b429a3 |
@@ -64,7 +64,7 @@
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"max_l2_tx_num_per_chunk": 1123,
|
||||
"max_tx_num_per_chunk": 1123,
|
||||
"max_l1_commit_gas_per_chunk": 11234567,
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"chunk_timeout_sec": 300,
|
||||
|
||||
@@ -2,6 +2,7 @@ package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -15,6 +16,13 @@ type Config struct {
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
}
|
||||
|
||||
func (c *Config) validate() error {
|
||||
if maxChunkPerBatch := c.L2Config.BatchProposerConfig.MaxChunkNumPerBatch; maxChunkPerBatch <= 0 {
|
||||
return fmt.Errorf("Invalid max_chunk_num_per_batch configuration: %v", maxChunkPerBatch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewConfig(file string) (*Config, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
@@ -28,5 +36,8 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cfg.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ type L2Config struct {
|
||||
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
type ChunkProposerConfig struct {
|
||||
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
|
||||
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
|
||||
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
|
||||
@@ -226,7 +226,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
|
||||
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
|
||||
@@ -148,7 +148,7 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx)
|
||||
dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx, int(p.maxChunkNumPerBatch)+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -183,6 +183,10 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
}
|
||||
|
||||
for i, chunk := range dbChunks {
|
||||
// metric values
|
||||
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
|
||||
lastTotalL1CommitGas := totalL1CommitGas
|
||||
|
||||
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas += chunk.TotalL1CommitGas
|
||||
// adjust batch data hash gas cost
|
||||
@@ -230,9 +234,9 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
|
||||
|
||||
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(len(dbChunks)))
|
||||
p.totalL1CommitGas.Set(float64(lastTotalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(i))
|
||||
return dbChunks[:i], nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func testBatchProposer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
@@ -40,7 +40,7 @@ func testBatchProposer(t *testing.T) {
|
||||
bp.TryProposeBatch()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, chunks)
|
||||
|
||||
|
||||
@@ -18,6 +18,10 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// maxNumBlockPerChunk is the maximum number of blocks we allow per chunk.
|
||||
// Normally we will pack much fewer blocks because of other limits.
|
||||
const maxNumBlockPerChunk int = 100
|
||||
|
||||
// chunkRowConsumption is map(sub-circuit name => sub-circuit row count)
|
||||
type chunkRowConsumption map[string]uint64
|
||||
|
||||
@@ -51,7 +55,7 @@ type ChunkProposer struct {
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxL2TxNumPerChunk uint64
|
||||
maxTxNumPerChunk uint64
|
||||
maxL1CommitGasPerChunk uint64
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
maxRowConsumptionPerChunk uint64
|
||||
@@ -62,7 +66,7 @@ type ChunkProposer struct {
|
||||
proposeChunkFailureTotal prometheus.Counter
|
||||
proposeChunkUpdateInfoTotal prometheus.Counter
|
||||
proposeChunkUpdateInfoFailureTotal prometheus.Counter
|
||||
chunkL2TxNum prometheus.Gauge
|
||||
chunkTxNum prometheus.Gauge
|
||||
chunkEstimateL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalTxGasUsed prometheus.Gauge
|
||||
@@ -75,7 +79,7 @@ type ChunkProposer struct {
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
log.Debug("new chunk proposer",
|
||||
"maxL2TxNumPerChunk", cfg.MaxL2TxNumPerChunk,
|
||||
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
|
||||
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
|
||||
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
|
||||
@@ -86,7 +90,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
|
||||
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
|
||||
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
@@ -109,9 +113,9 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
|
||||
Name: "bridge_propose_chunk_update_info_failure_total",
|
||||
Help: "Total number of propose chunk update info failure total.",
|
||||
}),
|
||||
chunkL2TxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_l2_tx_num",
|
||||
Help: "The chunk l2 tx num",
|
||||
chunkTxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_tx_num",
|
||||
Help: "The chunk tx num",
|
||||
}),
|
||||
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
|
||||
@@ -182,7 +186,7 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
|
||||
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx, maxNumBlockPerChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -193,14 +197,21 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
|
||||
var chunk types.Chunk
|
||||
var totalTxGasUsed uint64
|
||||
var totalL2TxNum uint64
|
||||
var totalTxNum uint64
|
||||
var totalL1CommitCalldataSize uint64
|
||||
var totalL1CommitGas uint64
|
||||
crc := chunkRowConsumption{}
|
||||
|
||||
for i, block := range blocks {
|
||||
// metric values
|
||||
lastTotalTxNum := totalTxNum
|
||||
lastTotalL1CommitGas := totalL1CommitGas
|
||||
lastCrcMax := crc.max()
|
||||
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
|
||||
lastTotalTxGasUsed := totalTxGasUsed
|
||||
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalTxNum += uint64(len(block.Transactions))
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas = chunk.EstimateL1CommitGas()
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
@@ -209,19 +220,19 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
}
|
||||
crcMax := crc.max()
|
||||
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk ||
|
||||
if totalTxNum > p.maxTxNumPerChunk ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
|
||||
crcMax > p.maxRowConsumptionPerChunk {
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if i == 0 {
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
if totalTxNum > p.maxTxNumPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
|
||||
block.Header.Number,
|
||||
totalL2TxNum,
|
||||
p.maxL2TxNumPerChunk,
|
||||
totalTxNum,
|
||||
p.maxTxNumPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -255,21 +266,21 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in chunking",
|
||||
"totalL2TxNum", totalL2TxNum,
|
||||
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
|
||||
"totalTxNum", totalTxNum,
|
||||
"maxTxNumPerChunk", p.maxTxNumPerChunk,
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
|
||||
"chunkRowConsumptionMax", crcMax,
|
||||
"chunkRowConsumption", crc,
|
||||
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
|
||||
|
||||
p.chunkL2TxNum.Set(float64(totalL2TxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(crcMax))
|
||||
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
|
||||
p.chunkTxNum.Set(float64(lastTotalTxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(lastTotalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(lastCrcMax))
|
||||
p.totalTxGasUsed.Set(float64(lastTotalTxGasUsed))
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
return &chunk, nil
|
||||
}
|
||||
@@ -284,7 +295,7 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"block outdated time threshold", currentTimeSec,
|
||||
)
|
||||
p.chunkFirstBlockTimeoutReached.Inc()
|
||||
p.chunkL2TxNum.Set(float64(totalL2TxNum))
|
||||
p.chunkTxNum.Set(float64(totalTxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(crc.max()))
|
||||
|
||||
@@ -23,7 +23,7 @@ func testChunkProposer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
@@ -38,7 +38,7 @@ func testChunkProposer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
|
||||
@@ -53,7 +53,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 0, // !
|
||||
@@ -62,7 +62,7 @@ func testChunkProposerRowConsumption(t *testing.T) {
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 0)
|
||||
}
|
||||
|
||||
@@ -88,12 +88,16 @@ func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endInde
|
||||
}
|
||||
|
||||
// GetUnbatchedChunks retrieves unbatched chunks from the database.
|
||||
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
|
||||
func (o *Chunk) GetUnbatchedChunks(ctx context.Context, limit int) ([]*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash IS NULL")
|
||||
db = db.Order("index asc")
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
|
||||
@@ -151,7 +155,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
var totalL1CommitCalldataSize uint64
|
||||
for _, block := range chunk.Blocks {
|
||||
totalL2TxGas += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL2TxNum += block.NumL2Transactions()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
}
|
||||
|
||||
|
||||
@@ -66,13 +66,17 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
|
||||
|
||||
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
|
||||
// The returned blocks are sorted in ascending order by their block number.
|
||||
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
|
||||
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context, limit int) ([]*types.WrappedBlock, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_root, row_consumption")
|
||||
db = db.Where("chunk_hash IS NULL")
|
||||
db = db.Order("number ASC")
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
var l2Blocks []L2Block
|
||||
if err := db.Find(&l2Blocks).Error; err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
|
||||
@@ -101,7 +101,7 @@ func TestL2BlockOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(3), height)
|
||||
|
||||
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
|
||||
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, wrappedBlock1, blocks[0])
|
||||
@@ -116,7 +116,7 @@ func TestL2BlockOrm(t *testing.T) {
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 2, 2, "test hash")
|
||||
assert.NoError(t, err)
|
||||
|
||||
blocks, err = l2BlockOrm.GetUnchunkedBlocks(context.Background())
|
||||
blocks, err = l2BlockOrm.GetUnchunkedBlocks(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 1)
|
||||
assert.Equal(t, wrappedBlock2, blocks[0])
|
||||
@@ -135,7 +135,7 @@ func TestChunkOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
@@ -156,7 +156,7 @@ func TestChunkOrm(t *testing.T) {
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
|
||||
assert.NoError(t, err)
|
||||
chunks, err = chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err = chunkOrm.GetUnbatchedChunks(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
@@ -67,7 +67,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background(), 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
|
||||
|
||||
28
common/libzkp/impl/Cargo.lock
generated
28
common/libzkp/impl/Cargo.lock
generated
@@ -32,7 +32,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -433,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1049,7 +1049,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1226,7 +1226,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1439,7 +1439,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1479,7 +1479,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -2077,7 +2077,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2264,7 +2264,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2279,7 +2279,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2754,8 +2754,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.2#a29cbaae9cb52b0eb61a4418caf6fbb6eb5d28f4"
|
||||
version = "0.7.5"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4039,8 +4039,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.7.2"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.2#a29cbaae9cb52b0eb61a4418caf6fbb6eb5d28f4"
|
||||
version = "0.7.5"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4491,7 +4491,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.2#194216272f813944a7013f14d73d1de19375d2ce"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -20,8 +20,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.2" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.2" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
base64 = "0.13.0"
|
||||
|
||||
@@ -130,17 +130,6 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
return total
|
||||
}
|
||||
|
||||
// L2TxsNum calculates the number of l2 txs.
|
||||
func (w *WrappedBlock) L2TxsNum() uint64 {
|
||||
var count uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type != types.L1MessageTxType {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
|
||||
if w.txPayloadLengthCache == nil {
|
||||
w.txPayloadLengthCache = make(map[string]uint64)
|
||||
|
||||
@@ -39,7 +39,7 @@ func TestChunkEncode(t *testing.T) {
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(358), wrappedBlock.EstimateL1CommitCalldataSize())
|
||||
assert.Equal(t, uint64(2), wrappedBlock.L2TxsNum())
|
||||
assert.Equal(t, uint64(2), wrappedBlock.NumL2Transactions())
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
@@ -61,7 +61,7 @@ func TestChunkEncode(t *testing.T) {
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
|
||||
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
|
||||
assert.Equal(t, uint64(1), wrappedBlock2.L2TxsNum())
|
||||
assert.Equal(t, uint64(1), wrappedBlock2.NumL2Transactions())
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tag = "v4.1.97"
|
||||
var tag = "v4.1.116"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -78,7 +78,7 @@ func CheckScrollProverVersionTag(proverVersion string) bool {
|
||||
if remoteTagMinor != 1 {
|
||||
return false
|
||||
}
|
||||
if remoteTagPatch < 96 {
|
||||
if remoteTagPatch < 98 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -12,6 +12,7 @@ import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
|
||||
import {L1StandardERC20Gateway} from "../../src/L1/gateways/L1StandardERC20Gateway.sol";
|
||||
import {L1WETHGateway} from "../../src/L1/gateways/L1WETHGateway.sol";
|
||||
import {L1DAIGateway} from "../../src/L1/gateways/L1DAIGateway.sol";
|
||||
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
|
||||
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
|
||||
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
|
||||
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
|
||||
@@ -71,6 +72,9 @@ contract InitializeL1BridgeContracts is Script {
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addSequencer(L1_COMMIT_SENDER_ADDRESS);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addProver(L1_FINALIZE_SENDER_ADDRESS);
|
||||
|
||||
// initialize MultipleVersionRollupVerifier
|
||||
MultipleVersionRollupVerifier(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR).initialize(L1_SCROLL_CHAIN_PROXY_ADDR);
|
||||
|
||||
// initialize L2GasPriceOracle
|
||||
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(
|
||||
21000, // _txGas
|
||||
|
||||
@@ -98,6 +98,9 @@ contract L1ScrollMessenger is ScrollMessengerBase, IL1ScrollMessenger {
|
||||
|
||||
rollup = _rollup;
|
||||
messageQueue = _messageQueue;
|
||||
|
||||
maxReplayTimes = 3;
|
||||
emit UpdateMaxReplayTimes(0, 3);
|
||||
}
|
||||
|
||||
/*****************************
|
||||
|
||||
@@ -14,8 +14,6 @@ import {ScrollConstants} from "../../libraries/constants/ScrollConstants.sol";
|
||||
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
import {IMessageDropCallback} from "../../libraries/callbacks/IMessageDropCallback.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, ScrollGatewayBase {
|
||||
using SafeERC20Upgradeable for IERC20Upgradeable;
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ contract L1ETHGateway is ScrollGatewayBase, IL1ETHGateway, IMessageDropCallback
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
uint256 _gasLimit
|
||||
) public payable override {
|
||||
) external payable override {
|
||||
_deposit(_to, _amount, new bytes(0), _gasLimit);
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,9 @@ interface IScrollChain {
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice The latest finalized batch index.
|
||||
function lastFinalizedBatchIndex() external view returns (uint256);
|
||||
|
||||
/// @notice Return the batch hash of a committed batch.
|
||||
/// @param batchIndex The index of the batch.
|
||||
function committedBatches(uint256 batchIndex) external view returns (bytes32);
|
||||
|
||||
@@ -4,6 +4,7 @@ pragma solidity =0.8.16;
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
|
||||
import {IScrollChain} from "./IScrollChain.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol";
|
||||
|
||||
@@ -38,6 +39,9 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// @notice The lastest used zkevm verifier.
|
||||
Verifier public latestVerifier;
|
||||
|
||||
/// @notice The address of ScrollChain contract.
|
||||
address public scrollChain;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
@@ -48,6 +52,12 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
latestVerifier.verifier = _verifier;
|
||||
}
|
||||
|
||||
function initialize(address _scrollChain) external onlyOwner {
|
||||
require(scrollChain == address(0), "initialized");
|
||||
|
||||
scrollChain = _scrollChain;
|
||||
}
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
@@ -101,6 +111,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable {
|
||||
/// @param _startBatchIndex The start batch index when the verifier will be used.
|
||||
/// @param _verifier The address of new verifier.
|
||||
function updateVerifier(uint64 _startBatchIndex, address _verifier) external onlyOwner {
|
||||
require(_startBatchIndex > IScrollChain(scrollChain).lastFinalizedBatchIndex(), "start batch index finalized");
|
||||
|
||||
Verifier memory _latestVerifier = latestVerifier;
|
||||
require(_startBatchIndex >= _latestVerifier.startBatchIndex, "start batch index too small");
|
||||
require(_verifier != address(0), "zero verifier address");
|
||||
|
||||
@@ -67,8 +67,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @notice Whether an account is a prover.
|
||||
mapping(address => bool) public isProver;
|
||||
|
||||
/// @notice The latest finalized batch index.
|
||||
uint256 public lastFinalizedBatchIndex;
|
||||
/// @inheritdoc IScrollChain
|
||||
uint256 public override lastFinalizedBatchIndex;
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
mapping(uint256 => bytes32) public override committedBatches;
|
||||
@@ -272,9 +272,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
require(_batchIndex > lastFinalizedBatchIndex, "can only revert unfinalized batch");
|
||||
|
||||
while (_count > 0) {
|
||||
committedBatches[_batchIndex] = bytes32(0);
|
||||
|
||||
emit RevertBatch(_batchIndex, _batchHash);
|
||||
|
||||
committedBatches[_batchIndex] = bytes32(0);
|
||||
unchecked {
|
||||
_batchIndex += 1;
|
||||
_count -= 1;
|
||||
|
||||
@@ -40,12 +40,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
|
||||
/// @notice Mapping from L1 message hash to a boolean value indicating if the message has been successfully executed.
|
||||
mapping(bytes32 => bool) public isL1MessageExecuted;
|
||||
|
||||
/// @notice Mapping from L1 message hash to the number of failure times.
|
||||
mapping(bytes32 => uint256) public l1MessageFailedTimes;
|
||||
|
||||
/// @notice The maximum number of times each L1 message can fail on L2.
|
||||
uint256 public maxFailedExecutionTimes;
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
@@ -58,8 +52,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
|
||||
|
||||
function initialize(address _counterpart) external initializer {
|
||||
ScrollMessengerBase.__ScrollMessengerBase_init(_counterpart, address(0));
|
||||
|
||||
maxFailedExecutionTimes = 3;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
@@ -105,22 +97,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
|
||||
_executeMessage(_from, _to, _value, _message, _xDomainCalldataHash);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update max failed execution times.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newMaxFailedExecutionTimes The new max failed execution times.
|
||||
function updateMaxFailedExecutionTimes(uint256 _newMaxFailedExecutionTimes) external onlyOwner {
|
||||
require(_newMaxFailedExecutionTimes > 0, "maxFailedExecutionTimes cannot be zero");
|
||||
|
||||
uint256 _oldMaxFailedExecutionTimes = maxFailedExecutionTimes;
|
||||
maxFailedExecutionTimes = _newMaxFailedExecutionTimes;
|
||||
|
||||
emit UpdateMaxFailedExecutionTimes(_oldMaxFailedExecutionTimes, _newMaxFailedExecutionTimes);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
@@ -181,11 +157,6 @@ contract L2ScrollMessenger is ScrollMessengerBase, IL2ScrollMessenger {
|
||||
isL1MessageExecuted[_xDomainCalldataHash] = true;
|
||||
emit RelayedMessage(_xDomainCalldataHash);
|
||||
} else {
|
||||
unchecked {
|
||||
uint256 _failedTimes = l1MessageFailedTimes[_xDomainCalldataHash] + 1;
|
||||
require(_failedTimes <= maxFailedExecutionTimes, "Exceed maximum failure times");
|
||||
l1MessageFailedTimes[_xDomainCalldataHash] = _failedTimes;
|
||||
}
|
||||
emit FailedRelayedMessage(_xDomainCalldataHash);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ import {IL2ERC20Gateway} from "./IL2ERC20Gateway.sol";
|
||||
|
||||
import {ScrollGatewayBase} from "../../libraries/gateway/ScrollGatewayBase.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
abstract contract L2ERC20Gateway is ScrollGatewayBase, IL2ERC20Gateway {
|
||||
/*************
|
||||
* Variables *
|
||||
|
||||
@@ -6,7 +6,6 @@ import {ERC2771Context} from "@openzeppelin/contracts/metatx/ERC2771Context.sol"
|
||||
import {ReentrancyGuard} from "@openzeppelin/contracts/security/ReentrancyGuard.sol";
|
||||
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
|
||||
import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
|
||||
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
|
||||
import {IERC20Permit} from "@openzeppelin/contracts/token/ERC20/extensions/draft-IERC20Permit.sol";
|
||||
|
||||
import {OwnableBase} from "../libraries/common/OwnableBase.sol";
|
||||
@@ -15,6 +14,7 @@ import {OwnableBase} from "../libraries/common/OwnableBase.sol";
|
||||
|
||||
contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
|
||||
using SafeERC20 for IERC20;
|
||||
using SafeERC20 for IERC20Permit;
|
||||
|
||||
/**********
|
||||
* Events *
|
||||
@@ -94,7 +94,7 @@ contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
|
||||
address _sender = _msgSender();
|
||||
|
||||
// do permit
|
||||
IERC20Permit(_permit.token).permit(
|
||||
IERC20Permit(_permit.token).safePermit(
|
||||
_sender,
|
||||
address(this),
|
||||
_permit.value,
|
||||
|
||||
@@ -106,6 +106,8 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
|
||||
|
||||
exceedValue = bound(exceedValue, 1, address(this).balance / 2);
|
||||
|
||||
l1Messenger.updateMaxReplayTimes(0);
|
||||
|
||||
// append a message
|
||||
l1Messenger.sendMessage{value: 100}(address(0), 100, new bytes(0), 0, refundAddress);
|
||||
|
||||
@@ -179,9 +181,9 @@ contract L1ScrollMessengerTest is L1GatewayTestBase {
|
||||
hevm.stopPrank();
|
||||
|
||||
hevm.expectEmit(false, false, false, true);
|
||||
emit UpdateMaxReplayTimes(0, _maxReplayTimes);
|
||||
emit UpdateMaxReplayTimes(3, _maxReplayTimes);
|
||||
|
||||
assertEq(l1Messenger.maxReplayTimes(), 0);
|
||||
assertEq(l1Messenger.maxReplayTimes(), 3);
|
||||
l1Messenger.updateMaxReplayTimes(_maxReplayTimes);
|
||||
assertEq(l1Messenger.maxReplayTimes(), _maxReplayTimes);
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
import {L1MessageQueue} from "../L1/rollup/L1MessageQueue.sol";
|
||||
import {MultipleVersionRollupVerifier} from "../L1/rollup/MultipleVersionRollupVerifier.sol";
|
||||
|
||||
import {MockScrollChain} from "./mocks/MockScrollChain.sol";
|
||||
import {MockZkEvmVerifier} from "./mocks/MockZkEvmVerifier.sol";
|
||||
|
||||
contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
@@ -17,27 +18,54 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
MockZkEvmVerifier private v0;
|
||||
MockZkEvmVerifier private v1;
|
||||
MockZkEvmVerifier private v2;
|
||||
MockScrollChain private chain;
|
||||
|
||||
function setUp() external {
|
||||
v0 = new MockZkEvmVerifier();
|
||||
v1 = new MockZkEvmVerifier();
|
||||
v2 = new MockZkEvmVerifier();
|
||||
chain = new MockScrollChain();
|
||||
|
||||
verifier = new MultipleVersionRollupVerifier(address(v0));
|
||||
}
|
||||
|
||||
function testInitialize(address _chain) external {
|
||||
hevm.assume(_chain != address(0));
|
||||
|
||||
// set by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
verifier.initialize(_chain);
|
||||
hevm.stopPrank();
|
||||
|
||||
// succeed
|
||||
assertEq(verifier.scrollChain(), address(0));
|
||||
verifier.initialize(_chain);
|
||||
assertEq(verifier.scrollChain(), _chain);
|
||||
|
||||
// initialized, revert
|
||||
hevm.expectRevert("initialized");
|
||||
verifier.initialize(_chain);
|
||||
}
|
||||
|
||||
function testUpdateVerifier(address _newVerifier) external {
|
||||
hevm.assume(_newVerifier != address(0));
|
||||
|
||||
verifier.initialize(address(chain));
|
||||
|
||||
// set by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
verifier.updateVerifier(0, address(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// start batch index finalized, revert
|
||||
hevm.expectRevert("start batch index finalized");
|
||||
verifier.updateVerifier(0, address(1));
|
||||
|
||||
// zero verifier address, revert
|
||||
hevm.expectRevert("zero verifier address");
|
||||
verifier.updateVerifier(0, address(0));
|
||||
verifier.updateVerifier(1, address(0));
|
||||
|
||||
// change to random operator
|
||||
assertEq(verifier.legacyVerifiersLength(), 0);
|
||||
@@ -65,6 +93,8 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
}
|
||||
|
||||
function testGetVerifier() external {
|
||||
verifier.initialize(address(chain));
|
||||
|
||||
verifier.updateVerifier(100, address(v1));
|
||||
verifier.updateVerifier(300, address(v2));
|
||||
|
||||
@@ -80,6 +110,8 @@ contract MultipleVersionRollupVerifierTest is DSTestPlus {
|
||||
}
|
||||
|
||||
function testVerifyAggregateProof() external {
|
||||
verifier.initialize(address(chain));
|
||||
|
||||
verifier.updateVerifier(100, address(v1));
|
||||
verifier.updateVerifier(300, address(v2));
|
||||
|
||||
|
||||
@@ -7,18 +7,7 @@ import {ScrollChain} from "../../L1/rollup/ScrollChain.sol";
|
||||
contract MockScrollChain is ScrollChain {
|
||||
constructor() ScrollChain(0) {}
|
||||
|
||||
/*
|
||||
function computePublicInputHash(uint64 accTotalL1Messages, Batch memory batch)
|
||||
external
|
||||
view
|
||||
returns (
|
||||
bytes32,
|
||||
uint64,
|
||||
uint64,
|
||||
uint64
|
||||
)
|
||||
{
|
||||
return _computePublicInputHash(accTotalL1Messages, batch);
|
||||
function setLastFinalizedBatchIndex(uint256 _lastFinalizedBatchIndex) external {
|
||||
lastFinalizedBatchIndex = _lastFinalizedBatchIndex;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func NewGetTaskController(cfg *config.Config, db *gorm.DB, vf *verifier.Verifier
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover tasks parameter invalid, err:%w", err)
|
||||
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
@@ -49,7 +49,7 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
proofType := ptc.proofType(&getTaskParameter)
|
||||
proverTask, isExist := ptc.proverTasks[proofType]
|
||||
if !isExist {
|
||||
nerr := fmt.Errorf("parameter wrong proof type")
|
||||
nerr := fmt.Errorf("parameter wrong proof type:%v", proofType)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/logic/submitproof"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
coodinatorType "scroll-tech/coordinator/internal/types"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// SubmitProofController the submit proof api controller
|
||||
@@ -31,10 +31,10 @@ func NewSubmitProofController(cfg *config.Config, db *gorm.DB, vf *verifier.Veri
|
||||
|
||||
// SubmitProof prover submit the proof to coordinator
|
||||
func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
var spp coodinatorType.SubmitProofParameter
|
||||
var spp coordinatorType.SubmitProofParameter
|
||||
if err := ctx.ShouldBind(&spp); err != nil {
|
||||
nerr := fmt.Errorf("parameter invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
var tmpChunkProof message.ChunkProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpChunkProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter chunk proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.ChunkProof = &tmpChunkProof
|
||||
@@ -60,7 +60,7 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
var tmpBatchProof message.BatchProof
|
||||
if err := json.Unmarshal([]byte(spp.Proof), &tmpBatchProof); err != nil {
|
||||
nerr := fmt.Errorf("unmarshal parameter batch proof invalid, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorParameterInvalidNo, nerr, nil)
|
||||
return
|
||||
}
|
||||
proofMsg.BatchProof = &tmpBatchProof
|
||||
@@ -69,8 +69,8 @@ func (spc *SubmitProofController) SubmitProof(ctx *gin.Context) {
|
||||
|
||||
if err := spc.submitProofReceiverLogic.HandleZkProof(ctx, &proofMsg, spp); err != nil {
|
||||
nerr := fmt.Errorf("handle zk proof failure, err:%w", err)
|
||||
coodinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.ErrCoordinatorHandleZkProofFailure, nerr, nil)
|
||||
return
|
||||
}
|
||||
coodinatorType.RenderJSON(ctx, types.Success, nil, nil)
|
||||
coordinatorType.RenderJSON(ctx, types.Success, nil, nil)
|
||||
}
|
||||
|
||||
@@ -29,10 +29,11 @@ type Collector struct {
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutChunkCheckerRunTotal prometheus.Counter
|
||||
chunkProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
timeoutChunkCheckerRunTotal prometheus.Counter
|
||||
chunkProverTaskTimeoutTotal prometheus.Counter
|
||||
checkBatchAllChunkReadyRunTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
@@ -62,10 +63,15 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
Name: "coordinator_chunk_prover_task_timeout_total",
|
||||
Help: "Total number of chunk timeout prover task.",
|
||||
}),
|
||||
checkBatchAllChunkReadyRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_check_batch_all_chunk_ready_run_total",
|
||||
Help: "Total number of check batch all chunks ready total",
|
||||
}),
|
||||
}
|
||||
|
||||
go c.timeoutBatchProofTask()
|
||||
go c.timeoutChunkProofTask()
|
||||
go c.checkBatchAllChunkReady()
|
||||
|
||||
log.Info("Start coordinator successfully.")
|
||||
|
||||
@@ -79,7 +85,6 @@ func (c *Collector) Stop() {
|
||||
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
|
||||
// chunk/batch task to unassigned. then the batch/chunk collector can retry it.
|
||||
|
||||
func (c *Collector) timeoutBatchProofTask() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
@@ -189,3 +194,60 @@ func (c *Collector) check(assignedProverTasks []orm.ProverTask, timeout promethe
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) checkBatchAllChunkReady() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("check batch all chunk ready panic error:%v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
c.checkBatchAllChunkReadyRunTotal.Inc()
|
||||
page := 1
|
||||
pageSize := 50
|
||||
for {
|
||||
offset := (page - 1) * pageSize
|
||||
batches, err := c.batchOrm.GetUnassignedAndChunksUnreadyBatches(c.ctx, offset, pageSize)
|
||||
if err != nil {
|
||||
log.Warn("checkBatchAllChunkReady GetUnassignedAndChunksUnreadyBatches", "error", err)
|
||||
break
|
||||
}
|
||||
|
||||
for _, batch := range batches {
|
||||
allReady, checkErr := c.chunkOrm.CheckIfBatchChunkProofsAreReady(c.ctx, batch.Hash)
|
||||
if checkErr != nil {
|
||||
log.Warn("checkBatchAllChunkReady CheckIfBatchChunkProofsAreReady failure", "error", checkErr, "hash", batch.Hash)
|
||||
continue
|
||||
}
|
||||
|
||||
if !allReady {
|
||||
continue
|
||||
}
|
||||
|
||||
if updateErr := c.batchOrm.UpdateChunkProofsStatusByBatchHash(c.ctx, batch.Hash, types.ChunkProofsStatusReady); updateErr != nil {
|
||||
log.Warn("checkBatchAllChunkReady UpdateChunkProofsStatusByBatchHash failure", "error", checkErr, "hash", batch.Hash)
|
||||
}
|
||||
}
|
||||
|
||||
if len(batches) < pageSize {
|
||||
break
|
||||
}
|
||||
page++
|
||||
}
|
||||
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +101,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
if len(batchTasks) != 1 {
|
||||
return nil, fmt.Errorf("get unassigned batch proving task len not 1, batch tasks:%v", batchTasks)
|
||||
log.Error("get unassigned batch proving task len not 1", "length", len(batchTasks), "batch tasks", batchTasks)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
batchTask := batchTasks[0]
|
||||
@@ -109,7 +110,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
|
||||
bp.batchAttemptsExceedTotal.Inc()
|
||||
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
|
||||
// TODO: retry fetching unassigned batch proving task
|
||||
log.Error("batch task proving attempts reach the maximum", "hash", batchTask.Hash)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
@@ -127,13 +130,15 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
|
||||
bp.recoverProvingStatus(ctx, batchTask)
|
||||
return nil, fmt.Errorf("db set session info fail, session id:%s, error:%w", proverTask.TaskID, err)
|
||||
log.Error("db set session info fail", "task hash", batchTask.Hash, "prover name", proverName.(string), "prover pubKey", publicKey.(string), "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx, batchTask.Hash)
|
||||
if err != nil {
|
||||
bp.recoverProvingStatus(ctx, batchTask)
|
||||
return nil, fmt.Errorf("format prover failure, id:%s error:%w", batchTask.Hash, err)
|
||||
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.Inc()
|
||||
|
||||
@@ -22,6 +22,9 @@ import (
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
|
||||
// ChunkProverTask the chunk prover task
|
||||
type ChunkProverTask struct {
|
||||
BaseProverTask
|
||||
@@ -94,7 +97,8 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// load and send chunk tasks
|
||||
chunkTasks, err := cp.chunkOrm.UpdateUnassignedChunkReturning(ctx, getTaskParameter.ProverHeight, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get unassigned chunk proving tasks, error:%w", err)
|
||||
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if len(chunkTasks) == 0 {
|
||||
@@ -102,7 +106,8 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
if len(chunkTasks) != 1 {
|
||||
return nil, fmt.Errorf("get unassigned chunk proving task len not 1, chunk tasks:%v", chunkTasks)
|
||||
log.Error("get unassigned chunk proving task len not 1", "length", len(chunkTasks), "chunk tasks", chunkTasks)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
chunkTask := chunkTasks[0]
|
||||
@@ -111,7 +116,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
|
||||
cp.chunkAttemptsExceedTotal.Inc()
|
||||
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
|
||||
// TODO: retry fetching unassigned chunk proving task
|
||||
log.Error("chunk task proving attempts reach the maximum", "hash", chunkTask.Hash)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
@@ -127,13 +134,15 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
if err = cp.proverTaskOrm.SetProverTask(ctx, &proverTask); err != nil {
|
||||
cp.recoverProvingStatus(ctx, chunkTask)
|
||||
return nil, fmt.Errorf("db set session info fail, session id:%s , public key:%s, err:%w", chunkTask.Hash, publicKey, err)
|
||||
log.Error("db set session info fail", "task hash", chunkTask.Hash, "prover name", proverName.(string), "prover pubKey", publicKey.(string), "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx, chunkTask.Hash)
|
||||
if err != nil {
|
||||
cp.recoverProvingStatus(ctx, chunkTask)
|
||||
return nil, fmt.Errorf("format prover task failure, id:%s error:%w", chunkTask.Hash, err)
|
||||
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.Inc()
|
||||
|
||||
@@ -34,6 +34,12 @@ var (
|
||||
ErrValidatorFailureProofTimeout = errors.New("validator failure submit proof timeout")
|
||||
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
|
||||
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
|
||||
// ErrValidatorFailureVerifiedFailed failed to verify and the verifier returns error
|
||||
ErrValidatorFailureVerifiedFailed = fmt.Errorf("verification failed, verifier returns error")
|
||||
// ErrValidatorSuccessInvalidProof successful verified and the proof is invalid
|
||||
ErrValidatorSuccessInvalidProof = fmt.Errorf("verification succeeded, it's an invalid proof")
|
||||
// ErrCoordinatorInternalFailure coordinator internal db failure
|
||||
ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error")
|
||||
)
|
||||
|
||||
// ProofReceiverLogic the proof receiver logic
|
||||
@@ -162,10 +168,10 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
|
||||
if verifyErr == nil {
|
||||
verifyErr = fmt.Errorf("verification succeeded and it's an invalid proof")
|
||||
if verifyErr != nil {
|
||||
return ErrValidatorFailureVerifiedFailed
|
||||
}
|
||||
return verifyErr
|
||||
return ErrValidatorSuccessInvalidProof
|
||||
}
|
||||
|
||||
m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds())
|
||||
@@ -176,7 +182,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg, proofTimeSec); err != nil {
|
||||
m.proofSubmitFailure.Inc()
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
return err
|
||||
return ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -377,7 +383,7 @@ func (m *ProofReceiverLogic) processProverErr(ctx context.Context, taskID, pk st
|
||||
log.Error("update prover task proving status failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", updateErr)
|
||||
}
|
||||
|
||||
proverTasks, err := m.proverTaskOrm.GetValidOrAssignedTaskOfOtherProvers(ctx, taskType, taskID, pk)
|
||||
proverTasks, err := m.proverTaskOrm.GetAssignedTaskOfOtherProvers(ctx, taskType, taskID, pk)
|
||||
if err != nil {
|
||||
log.Warn("checkIsAssignedToOtherProver failure", "taskID", taskID, "proverPublicKey", pk, "taskType", taskType, "error", err)
|
||||
return
|
||||
|
||||
@@ -91,6 +91,26 @@ func (o *Batch) GetUnassignedBatches(ctx context.Context, limit int) ([]*Batch,
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetUnassignedAndChunksUnreadyBatches get the batches which is unassigned and chunks is not ready
|
||||
func (o *Batch) GetUnassignedAndChunksUnreadyBatches(ctx context.Context, offset, limit int) ([]*Batch, error) {
|
||||
if offset < 0 || limit < 0 {
|
||||
return nil, errors.New("limit and offset must not be smaller than 0")
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
db = db.Where("chunk_proofs_status = ?", types.ChunkProofsStatusPending)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Offset(offset)
|
||||
db = db.Limit(limit)
|
||||
|
||||
var batches []*Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedAndChunksUnreadyBatches error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatches retrieves all batches whose proving_status is either types.ProvingTaskAssigned.
|
||||
func (o *Batch) GetAssignedBatches(ctx context.Context) ([]*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
@@ -234,7 +234,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range chunk.Blocks {
|
||||
totalL2TxGas += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL2TxNum += block.NumL2Transactions()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
}
|
||||
|
||||
@@ -131,14 +131,14 @@ func (o *ProverTask) GetProverTaskByTaskIDAndProver(ctx context.Context, taskTyp
|
||||
return &proverTask, nil
|
||||
}
|
||||
|
||||
// GetValidOrAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
|
||||
func (o *ProverTask) GetValidOrAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
|
||||
// GetAssignedTaskOfOtherProvers get the chunk/batch task assigned other provers
|
||||
func (o *ProverTask) GetAssignedTaskOfOtherProvers(ctx context.Context, taskType message.ProofType, taskID, proverPublicKey string) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type", int(taskType))
|
||||
db = db.Where("task_id", taskID)
|
||||
db = db.Where("prover_public_key != ?", proverPublicKey)
|
||||
db = db.Where("proving_status in (?)", []int{int(types.ProverAssigned), int(types.ProverProofValid)})
|
||||
db = db.Where("proving_status = ?", int(types.ProverAssigned))
|
||||
|
||||
var proverTasks []ProverTask
|
||||
if err := db.Find(&proverTasks).Error; err != nil {
|
||||
|
||||
@@ -110,7 +110,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
version.Version = "v4.1.97-aaa-bbb-ccc"
|
||||
version.Version = "v4.1.98-aaa-bbb-ccc"
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
@@ -113,7 +113,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
var totalL1CommitGas uint64
|
||||
for _, block := range chunk.Blocks {
|
||||
totalL2TxGas += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL2TxNum += block.NumL2Transactions()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas += block.EstimateL1CommitGas()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user